summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-08-16 11:32:46 +0200
committerMichaël Zasso <targos@protonmail.com>2019-08-19 09:25:23 +0200
commite31f0a7d25668d3c1531294d2ef44a9f3bde4ef4 (patch)
tree6c6bed9804be9df6162b2483f0a56f371f66464d /deps/v8/src
parentec16fdae540adaf710b1a86c620170b2880088f0 (diff)
downloadandroid-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.gz
android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.bz2
android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.zip
deps: update V8 to 7.7.299.4
PR-URL: https://github.com/nodejs/node/pull/28918 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/DEPS6
-rw-r--r--deps/v8/src/OWNERS10
-rw-r--r--deps/v8/src/api/OWNERS11
-rw-r--r--deps/v8/src/api/api-natives.cc33
-rw-r--r--deps/v8/src/api/api.cc428
-rw-r--r--deps/v8/src/api/api.h2
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/asmjs/asm-js.cc12
-rw-r--r--deps/v8/src/asmjs/asm-js.h2
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc26
-rw-r--r--deps/v8/src/asmjs/asm-parser.h6
-rw-r--r--deps/v8/src/ast/OWNERS2
-rw-r--r--deps/v8/src/ast/ast.cc15
-rw-r--r--deps/v8/src/ast/ast.h53
-rw-r--r--deps/v8/src/ast/modules.cc102
-rw-r--r--deps/v8/src/ast/modules.h16
-rw-r--r--deps/v8/src/ast/prettyprinter.cc26
-rw-r--r--deps/v8/src/ast/scopes.cc81
-rw-r--r--deps/v8/src/ast/scopes.h15
-rw-r--r--deps/v8/src/ast/variables.h53
-rw-r--r--deps/v8/src/base/adapters.h2
-rw-r--r--deps/v8/src/base/lsan.h29
-rw-r--r--deps/v8/src/base/memory.h (renamed from deps/v8/src/common/v8memory.h)30
-rw-r--r--deps/v8/src/base/platform/OWNERS2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc4
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc6
-rw-r--r--deps/v8/src/base/small-vector.h23
-rw-r--r--deps/v8/src/base/vlq-base64.cc58
-rw-r--r--deps/v8/src/base/vlq-base64.h23
-rw-r--r--deps/v8/src/builtins/OWNERS3
-rw-r--r--deps/v8/src/builtins/accessors.cc5
-rw-r--r--deps/v8/src/builtins/arguments.tq4
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc43
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc39
-rw-r--r--deps/v8/src/builtins/array-copywithin.tq2
-rw-r--r--deps/v8/src/builtins/array-every.tq29
-rw-r--r--deps/v8/src/builtins/array-filter.tq31
-rw-r--r--deps/v8/src/builtins/array-find.tq34
-rw-r--r--deps/v8/src/builtins/array-findindex.tq35
-rw-r--r--deps/v8/src/builtins/array-foreach.tq30
-rw-r--r--deps/v8/src/builtins/array-join.tq39
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq4
-rw-r--r--deps/v8/src/builtins/array-map.tq49
-rw-r--r--deps/v8/src/builtins/array-of.tq5
-rw-r--r--deps/v8/src/builtins/array-reduce-right.tq53
-rw-r--r--deps/v8/src/builtins/array-reduce.tq50
-rw-r--r--deps/v8/src/builtins/array-reverse.tq2
-rw-r--r--deps/v8/src/builtins/array-shift.tq2
-rw-r--r--deps/v8/src/builtins/array-slice.tq9
-rw-r--r--deps/v8/src/builtins/array-some.tq30
-rw-r--r--deps/v8/src/builtins/array-splice.tq19
-rw-r--r--deps/v8/src/builtins/array-unshift.tq2
-rw-r--r--deps/v8/src/builtins/array.tq36
-rw-r--r--deps/v8/src/builtins/base.tq625
-rw-r--r--deps/v8/src/builtins/bigint.tq206
-rw-r--r--deps/v8/src/builtins/boolean.tq29
-rw-r--r--deps/v8/src/builtins/builtins-api.cc16
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-array.cc5
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc41
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.h80
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc4
-rw-r--r--deps/v8/src/builtins/builtins-boolean-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc13
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc60
-rw-r--r--deps/v8/src/builtins/builtins-console.cc3
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc36
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h12
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc24
-rw-r--r--deps/v8/src/builtins/builtins-data-view-gen.h8
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h19
-rw-r--r--deps/v8/src/builtins/builtins-error.cc9
-rw-r--r--deps/v8/src/builtins/builtins-global.cc18
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc38
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc117
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc40
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc31
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h29
-rw-r--r--deps/v8/src/builtins/builtins-math.cc26
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-number.cc20
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc44
-rw-r--r--deps/v8/src/builtins/builtins-object.cc72
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc32
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc97
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h29
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc127
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc141
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h36
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc2
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc48
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-symbol-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc73
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h10
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc80
-rw-r--r--deps/v8/src/builtins/collections.tq2
-rw-r--r--deps/v8/src/builtins/data-view.tq383
-rw-r--r--deps/v8/src/builtins/extras-utils.tq7
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc46
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq2
-rw-r--r--deps/v8/src/builtins/iterator.tq8
-rw-r--r--deps/v8/src/builtins/math.tq48
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc76
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc74
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq7
-rw-r--r--deps/v8/src/builtins/object.tq138
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc195
-rw-r--r--deps/v8/src/builtins/proxy-constructor.tq9
-rw-r--r--deps/v8/src/builtins/proxy-delete-property.tq67
-rw-r--r--deps/v8/src/builtins/proxy-get-property.tq32
-rw-r--r--deps/v8/src/builtins/proxy-get-prototype-of.tq70
-rw-r--r--deps/v8/src/builtins/proxy-has-property.tq6
-rw-r--r--deps/v8/src/builtins/proxy-is-extensible.tq56
-rw-r--r--deps/v8/src/builtins/proxy-prevent-extensions.tq66
-rw-r--r--deps/v8/src/builtins/proxy-revocable.tq8
-rw-r--r--deps/v8/src/builtins/proxy-revoke.tq2
-rw-r--r--deps/v8/src/builtins/proxy-set-property.tq22
-rw-r--r--deps/v8/src/builtins/proxy-set-prototype-of.tq77
-rw-r--r--deps/v8/src/builtins/proxy.tq30
-rw-r--r--deps/v8/src/builtins/reflect.tq82
-rw-r--r--deps/v8/src/builtins/regexp-replace.tq11
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc188
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc25
-rw-r--r--deps/v8/src/builtins/string-endswith.tq9
-rw-r--r--deps/v8/src/builtins/string-html.tq47
-rw-r--r--deps/v8/src/builtins/string-iterator.tq10
-rw-r--r--deps/v8/src/builtins/string-repeat.tq2
-rw-r--r--deps/v8/src/builtins/string-slice.tq2
-rw-r--r--deps/v8/src/builtins/string-startswith.tq16
-rw-r--r--deps/v8/src/builtins/string-substring.tq2
-rw-r--r--deps/v8/src/builtins/string.tq32
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq158
-rw-r--r--deps/v8/src/builtins/typed-array-every.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-filter.tq2
-rw-r--r--deps/v8/src/builtins/typed-array-find.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-findindex.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-foreach.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-reduce.tq10
-rw-r--r--deps/v8/src/builtins/typed-array-reduceright.tq10
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq2
-rw-r--r--deps/v8/src/builtins/typed-array-some.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-subarray.tq3
-rw-r--r--deps/v8/src/builtins/typed-array.tq31
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc46
-rw-r--r--deps/v8/src/codegen/DEPS9
-rw-r--r--deps/v8/src/codegen/OWNERS8
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc13
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h3
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc26
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h5
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h130
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc423
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h247
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h4
-rw-r--r--deps/v8/src/codegen/arm64/cpu-arm64.cc2
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64.h2
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h1
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc34
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h25
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc36
-rw-r--r--deps/v8/src/codegen/assembler.h33
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc871
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h391
-rw-r--r--deps/v8/src/codegen/compiler.cc155
-rw-r--r--deps/v8/src/codegen/compiler.h19
-rw-r--r--deps/v8/src/codegen/constant-pool.cc249
-rw-r--r--deps/v8/src/codegen/constant-pool.h190
-rw-r--r--deps/v8/src/codegen/cpu-features.h1
-rw-r--r--deps/v8/src/codegen/external-reference.cc37
-rw-r--r--deps/v8/src/codegen/external-reference.h7
-rw-r--r--deps/v8/src/codegen/handler-table.cc38
-rw-r--r--deps/v8/src/codegen/handler-table.h10
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc7
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h1
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc14
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h12
-rw-r--r--deps/v8/src/codegen/label.h2
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc3
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h16
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc33
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h15
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc3
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h4
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc33
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h15
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc12
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h21
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.cc97
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.h44
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc22
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h27
-rw-r--r--deps/v8/src/codegen/ppc/code-stubs-ppc.cc28
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc39
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h5
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc23
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h29
-rw-r--r--deps/v8/src/codegen/s390/code-stubs-s390.cc27
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc22
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h5
-rw-r--r--deps/v8/src/codegen/safepoint-table.h13
-rw-r--r--deps/v8/src/codegen/source-position-table.cc15
-rw-r--r--deps/v8/src/codegen/tick-counter.cc23
-rw-r--r--deps/v8/src/codegen/tick-counter.h28
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h6
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc220
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h24
-rw-r--r--deps/v8/src/codegen/x64/constants-x64.h3
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc82
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h18
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h5
-rw-r--r--deps/v8/src/common/OWNERS3
-rw-r--r--deps/v8/src/common/globals.h37
-rw-r--r--deps/v8/src/common/message-template.h (renamed from deps/v8/src/execution/message-template.h)18
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h31
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS7
-rw-r--r--deps/v8/src/compiler/OWNERS5
-rw-r--r--deps/v8/src/compiler/STYLE29
-rw-r--r--deps/v8/src/compiler/access-builder.cc28
-rw-r--r--deps/v8/src/compiler/access-builder.h11
-rw-r--r--deps/v8/src/compiler/access-info.cc42
-rw-r--r--deps/v8/src/compiler/access-info.h35
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.cc51
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.h45
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc68
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc9
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc73
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h1
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc80
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc4
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc64
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc9
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h12
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc105
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h22
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc5
-rw-r--r--deps/v8/src/compiler/backend/instruction.h6
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.h13
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.cc24
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc9
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc19
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc88
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h1
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc1
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc9
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc121
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h36
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc63
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc33
-rw-r--r--deps/v8/src/compiler/backend/unwinding-info-writer.h1
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc352
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h30
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc30
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc122
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc94
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h45
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc324
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h11
-rw-r--r--deps/v8/src/compiler/code-assembler.cc32
-rw-r--r--deps/v8/src/compiler/code-assembler.h123
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc8
-rw-r--r--deps/v8/src/compiler/common-operator.cc12
-rw-r--r--deps/v8/src/compiler/common-operator.h1
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc56
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h15
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h32
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h7
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc336
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h118
-rw-r--r--deps/v8/src/compiler/decompression-elimination.cc37
-rw-r--r--deps/v8/src/compiler/decompression-elimination.h5
-rw-r--r--deps/v8/src/compiler/diamond.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc320
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc54
-rw-r--r--deps/v8/src/compiler/escape-analysis.h11
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc9
-rw-r--r--deps/v8/src/compiler/graph-assembler.h38
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc12
-rw-r--r--deps/v8/src/compiler/graph-reducer.h8
-rw-r--r--deps/v8/src/compiler/heap-refs.h906
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc37
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc1265
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h17
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc20
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-graph.cc8
-rw-r--r--deps/v8/src/compiler/js-graph.h52
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc909
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h831
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc3
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc66
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc17
-rw-r--r--deps/v8/src/compiler/js-inlining.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc320
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h35
-rw-r--r--deps/v8/src/compiler/js-operator.cc15
-rw-r--r--deps/v8/src/compiler/js-operator.h14
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc58
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc18
-rw-r--r--deps/v8/src/compiler/linkage.cc8
-rw-r--r--deps/v8/src/compiler/linkage.h2
-rw-r--r--deps/v8/src/compiler/load-elimination.cc28
-rw-r--r--deps/v8/src/compiler/load-elimination.h2
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc20
-rw-r--r--deps/v8/src/compiler/loop-analysis.h6
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc13
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc3
-rw-r--r--deps/v8/src/compiler/machine-operator.cc55
-rw-r--r--deps/v8/src/compiler/machine-operator.h42
-rw-r--r--deps/v8/src/compiler/map-inference.cc25
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc45
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h6
-rw-r--r--deps/v8/src/compiler/node-properties.cc3
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/node.cc8
-rw-r--r--deps/v8/src/compiler/opcodes.h53
-rw-r--r--deps/v8/src/compiler/operation-typer.cc31
-rw-r--r--deps/v8/src/compiler/operation-typer.h6
-rw-r--r--deps/v8/src/compiler/pipeline.cc307
-rw-r--r--deps/v8/src/compiler/pipeline.h15
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc13
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc4
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h5
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc3
-rw-r--r--deps/v8/src/compiler/representation-change.cc192
-rw-r--r--deps/v8/src/compiler/representation-change.h51
-rw-r--r--deps/v8/src/compiler/scheduler.cc38
-rw-r--r--deps/v8/src/compiler/scheduler.h10
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc1402
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h329
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc29
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h9
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc175
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h8
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc112
-rw-r--r--deps/v8/src/compiler/simplified-operator.h29
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc8
-rw-r--r--deps/v8/src/compiler/state-values-utils.h4
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc16
-rw-r--r--deps/v8/src/compiler/store-store-elimination.h6
-rw-r--r--deps/v8/src/compiler/typer.cc30
-rw-r--r--deps/v8/src/compiler/typer.h7
-rw-r--r--deps/v8/src/compiler/types.cc14
-rw-r--r--deps/v8/src/compiler/types.h3
-rw-r--r--deps/v8/src/compiler/verifier.cc46
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc712
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h51
-rw-r--r--deps/v8/src/d8/d8.cc87
-rw-r--r--deps/v8/src/date/OWNERS3
-rw-r--r--deps/v8/src/debug/OWNERS2
-rw-r--r--deps/v8/src/debug/debug-coverage.cc15
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc9
-rw-r--r--deps/v8/src/debug/debug-evaluate.h5
-rw-r--r--deps/v8/src/debug/debug-frames.cc15
-rw-r--r--deps/v8/src/debug/debug-frames.h11
-rw-r--r--deps/v8/src/debug/debug-interface.h5
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.h1
-rw-r--r--deps/v8/src/debug/debug-scopes.cc60
-rw-r--r--deps/v8/src/debug/debug-scopes.h2
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc5
-rw-r--r--deps/v8/src/debug/debug.cc20
-rw-r--r--deps/v8/src/debug/debug.h11
-rw-r--r--deps/v8/src/debug/liveedit.cc40
-rw-r--r--deps/v8/src/deoptimizer/OWNERS2
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h1
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc90
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h26
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc10
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc2
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc2
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc42
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc2
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc10
-rw-r--r--deps/v8/src/diagnostics/DEPS3
-rw-r--r--deps/v8/src/diagnostics/eh-frame.cc8
-rw-r--r--deps/v8/src/diagnostics/eh-frame.h14
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.cc87
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc124
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc294
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc82
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc108
-rw-r--r--deps/v8/src/execution/OWNERS5
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.cc4
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.cc6
-rw-r--r--deps/v8/src/execution/execution.cc350
-rw-r--r--deps/v8/src/execution/execution.h177
-rw-r--r--deps/v8/src/execution/frame-constants.h9
-rw-r--r--deps/v8/src/execution/frames-inl.h33
-rw-r--r--deps/v8/src/execution/frames.cc46
-rw-r--r--deps/v8/src/execution/frames.h25
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.cc6
-rw-r--r--deps/v8/src/execution/interrupts-scope.cc42
-rw-r--r--deps/v8/src/execution/interrupts-scope.h72
-rw-r--r--deps/v8/src/execution/isolate-data.h8
-rw-r--r--deps/v8/src/execution/isolate-inl.h7
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h64
-rw-r--r--deps/v8/src/execution/isolate-utils.h31
-rw-r--r--deps/v8/src/execution/isolate.cc231
-rw-r--r--deps/v8/src/execution/isolate.h83
-rw-r--r--deps/v8/src/execution/messages.cc312
-rw-r--r--deps/v8/src/execution/messages.h21
-rw-r--r--deps/v8/src/execution/microtask-queue.cc2
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.cc7
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc372
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.h18
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.cc3
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc372
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.h20
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc2
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc90
-rw-r--r--deps/v8/src/execution/stack-guard.cc345
-rw-r--r--deps/v8/src/execution/stack-guard.h186
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.cc1
-rw-r--r--deps/v8/src/extensions/OWNERS1
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.cc56
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.h38
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc7
-rw-r--r--deps/v8/src/flags/OWNERS1
-rw-r--r--deps/v8/src/flags/flag-definitions.h81
-rw-r--r--deps/v8/src/handles/OWNERS3
-rw-r--r--deps/v8/src/handles/handles.cc4
-rw-r--r--deps/v8/src/heap/OWNERS2
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h2
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h2
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.cc54
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h229
-rw-r--r--deps/v8/src/heap/code-stats.cc6
-rw-r--r--deps/v8/src/heap/combined-heap.cc10
-rw-r--r--deps/v8/src/heap/combined-heap.h20
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc30
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc6
-rw-r--r--deps/v8/src/heap/embedder-tracing.h21
-rw-r--r--deps/v8/src/heap/factory-inl.h9
-rw-r--r--deps/v8/src/heap/factory.cc280
-rw-r--r--deps/v8/src/heap/factory.h48
-rw-r--r--deps/v8/src/heap/gc-tracer.cc29
-rw-r--r--deps/v8/src/heap/gc-tracer.h2
-rw-r--r--deps/v8/src/heap/heap-controller.cc21
-rw-r--r--deps/v8/src/heap/heap-controller.h7
-rw-r--r--deps/v8/src/heap/heap-inl.h17
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h48
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h2
-rw-r--r--deps/v8/src/heap/heap.cc649
-rw-r--r--deps/v8/src/heap/heap.h214
-rw-r--r--deps/v8/src/heap/incremental-marking.cc39
-rw-r--r--deps/v8/src/heap/incremental-marking.h12
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc7
-rw-r--r--deps/v8/src/heap/item-parallel-job.h6
-rw-r--r--deps/v8/src/heap/mark-compact.cc194
-rw-r--r--deps/v8/src/heap/object-stats.cc2
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h28
-rw-r--r--deps/v8/src/heap/objects-visiting.h5
-rw-r--r--deps/v8/src/heap/read-only-heap-inl.h31
-rw-r--r--deps/v8/src/heap/read-only-heap.cc85
-rw-r--r--deps/v8/src/heap/read-only-heap.h23
-rw-r--r--deps/v8/src/heap/remembered-set.h4
-rw-r--r--deps/v8/src/heap/scavenger-inl.h24
-rw-r--r--deps/v8/src/heap/scavenger.cc22
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc16
-rw-r--r--deps/v8/src/heap/spaces-inl.h36
-rw-r--r--deps/v8/src/heap/spaces.cc574
-rw-r--r--deps/v8/src/heap/spaces.h1103
-rw-r--r--deps/v8/src/heap/store-buffer.cc11
-rw-r--r--deps/v8/src/heap/stress-marking-observer.cc8
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h4
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.cc24
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.h4
-rw-r--r--deps/v8/src/heap/sweeper.cc12
-rw-r--r--deps/v8/src/ic/OWNERS2
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc741
-rw-r--r--deps/v8/src/ic/accessor-assembler.h113
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc28
-rw-r--r--deps/v8/src/ic/call-optimization.cc3
-rw-r--r--deps/v8/src/ic/ic-inl.h24
-rw-r--r--deps/v8/src/ic/ic.cc286
-rw-r--r--deps/v8/src/ic/ic.h37
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc87
-rw-r--r--deps/v8/src/ic/stub-cache.cc46
-rw-r--r--deps/v8/src/ic/stub-cache.h14
-rw-r--r--deps/v8/src/init/OWNERS11
-rw-r--r--deps/v8/src/init/bootstrapper.cc162
-rw-r--r--deps/v8/src/init/heap-symbols.h15
-rw-r--r--deps/v8/src/init/isolate-allocator.cc2
-rw-r--r--deps/v8/src/init/setup-isolate-deserialize.cc1
-rw-r--r--deps/v8/src/inspector/BUILD.gn15
-rw-r--r--deps/v8/src/inspector/DEPS3
-rw-r--r--deps/v8/src/inspector/OWNERS8
-rw-r--r--deps/v8/src/inspector/injected-script.cc42
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json10
-rw-r--r--deps/v8/src/inspector/js_protocol-1.2.json997
-rw-r--r--deps/v8/src/inspector/js_protocol-1.3.json1205
-rw-r--r--deps/v8/src/inspector/js_protocol.pdl1492
-rw-r--r--deps/v8/src/inspector/string-16.cc17
-rw-r--r--deps/v8/src/inspector/string-16.h15
-rw-r--r--deps/v8/src/inspector/string-util.cc6
-rw-r--r--deps/v8/src/inspector/string-util.h25
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc21
-rw-r--r--deps/v8/src/inspector/v8-console.cc10
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc54
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc3
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc31
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc93
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h3
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc92
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc15
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.cc9
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc15
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-string-conversions.cc7
-rw-r--r--deps/v8/src/inspector/value-mirror.cc86
-rw-r--r--deps/v8/src/interpreter/OWNERS2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc92
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h33
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc4
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc12
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc12
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc10
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc169
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h17
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc23
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc17
-rw-r--r--deps/v8/src/interpreter/interpreter.h6
-rw-r--r--deps/v8/src/json/OWNERS3
-rw-r--r--deps/v8/src/json/json-parser.cc4
-rw-r--r--deps/v8/src/json/json-stringifier.cc24
-rw-r--r--deps/v8/src/libplatform/tracing/OWNERS1
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.cc4
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.h5
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-consumer.cc44
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-consumer.h80
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-producer.cc45
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-producer.h70
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc28
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-shared-memory.h45
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tasks.cc52
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tasks.h55
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc130
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h86
-rw-r--r--deps/v8/src/libplatform/tracing/trace-event-listener.cc27
-rw-r--r--deps/v8/src/libplatform/tracing/trace-event-listener.h9
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc147
-rw-r--r--deps/v8/src/libsampler/OWNERS3
-rw-r--r--deps/v8/src/logging/counters-definitions.h9
-rw-r--r--deps/v8/src/logging/counters.h4
-rw-r--r--deps/v8/src/logging/log.cc57
-rw-r--r--deps/v8/src/numbers/OWNERS6
-rw-r--r--deps/v8/src/numbers/conversions.cc4
-rw-r--r--deps/v8/src/objects/OWNERS3
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h8
-rw-r--r--deps/v8/src/objects/api-callbacks.h16
-rw-r--r--deps/v8/src/objects/arguments-inl.h10
-rw-r--r--deps/v8/src/objects/arguments.h4
-rw-r--r--deps/v8/src/objects/bigint.cc183
-rw-r--r--deps/v8/src/objects/bigint.h10
-rw-r--r--deps/v8/src/objects/code-inl.h50
-rw-r--r--deps/v8/src/objects/code.cc22
-rw-r--r--deps/v8/src/objects/code.h50
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h54
-rw-r--r--deps/v8/src/objects/compressed-slots.h29
-rw-r--r--deps/v8/src/objects/contexts-inl.h8
-rw-r--r--deps/v8/src/objects/contexts.cc40
-rw-r--r--deps/v8/src/objects/contexts.h52
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h114
-rw-r--r--deps/v8/src/objects/descriptor-array.h45
-rw-r--r--deps/v8/src/objects/dictionary-inl.h37
-rw-r--r--deps/v8/src/objects/dictionary.h11
-rw-r--r--deps/v8/src/objects/elements.cc251
-rw-r--r--deps/v8/src/objects/elements.h11
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h6
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h19
-rw-r--r--deps/v8/src/objects/feedback-vector.cc73
-rw-r--r--deps/v8/src/objects/feedback-vector.h14
-rw-r--r--deps/v8/src/objects/field-index-inl.h12
-rw-r--r--deps/v8/src/objects/field-index.h6
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h78
-rw-r--r--deps/v8/src/objects/fixed-array.h45
-rw-r--r--deps/v8/src/objects/free-space-inl.h36
-rw-r--r--deps/v8/src/objects/free-space.h3
-rw-r--r--deps/v8/src/objects/hash-table-inl.h20
-rw-r--r--deps/v8/src/objects/hash-table.h9
-rw-r--r--deps/v8/src/objects/heap-number-inl.h4
-rw-r--r--deps/v8/src/objects/heap-object-inl.h14
-rw-r--r--deps/v8/src/objects/heap-object.h58
-rw-r--r--deps/v8/src/objects/instance-type.h40
-rw-r--r--deps/v8/src/objects/intl-objects.cc273
-rw-r--r--deps/v8/src/objects/intl-objects.h28
-rw-r--r--deps/v8/src/objects/intl-objects.tq24
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h4
-rw-r--r--deps/v8/src/objects/js-array-buffer.h6
-rw-r--r--deps/v8/src/objects/js-array-inl.h7
-rw-r--r--deps/v8/src/objects/js-array.h6
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h12
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc20
-rw-r--r--deps/v8/src/objects/js-break-iterator.h32
-rw-r--r--deps/v8/src/objects/js-collator-inl.h2
-rw-r--r--deps/v8/src/objects/js-collator.cc29
-rw-r--r--deps/v8/src/objects/js-collator.h14
-rw-r--r--deps/v8/src/objects/js-collection-iterator.h1
-rw-r--r--deps/v8/src/objects/js-collection.h4
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc221
-rw-r--r--deps/v8/src/objects/js-date-time-format.h6
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-list-format.cc64
-rw-r--r--deps/v8/src/objects/js-list-format.h18
-rw-r--r--deps/v8/src/objects/js-locale.cc13
-rw-r--r--deps/v8/src/objects/js-locale.h9
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h11
-rw-r--r--deps/v8/src/objects/js-number-format.cc326
-rw-r--r--deps/v8/src/objects/js-number-format.h37
-rw-r--r--deps/v8/src/objects/js-objects-inl.h351
-rw-r--r--deps/v8/src/objects/js-objects.cc320
-rw-r--r--deps/v8/src/objects/js-objects.h263
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h7
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc229
-rw-r--r--deps/v8/src/objects/js-plural-rules.h20
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h7
-rw-r--r--deps/v8/src/objects/js-proxy.h19
-rw-r--r--deps/v8/src/objects/js-regexp.h57
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc39
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h19
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.cc21
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.cc31
-rw-r--r--deps/v8/src/objects/js-segmenter.h13
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h14
-rw-r--r--deps/v8/src/objects/js-weak-refs.h68
-rw-r--r--deps/v8/src/objects/keys.cc5
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h4
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h72
-rw-r--r--deps/v8/src/objects/literal-objects.cc75
-rw-r--r--deps/v8/src/objects/literal-objects.h19
-rw-r--r--deps/v8/src/objects/lookup-inl.h55
-rw-r--r--deps/v8/src/objects/lookup.cc487
-rw-r--r--deps/v8/src/objects/lookup.h10
-rw-r--r--deps/v8/src/objects/map-inl.h156
-rw-r--r--deps/v8/src/objects/map-updater.cc27
-rw-r--r--deps/v8/src/objects/map-updater.h5
-rw-r--r--deps/v8/src/objects/map.cc185
-rw-r--r--deps/v8/src/objects/map.h91
-rw-r--r--deps/v8/src/objects/maybe-object.h4
-rw-r--r--deps/v8/src/objects/module-inl.h97
-rw-r--r--deps/v8/src/objects/module.cc768
-rw-r--r--deps/v8/src/objects/module.h220
-rw-r--r--deps/v8/src/objects/name-inl.h31
-rw-r--r--deps/v8/src/objects/name.h37
-rw-r--r--deps/v8/src/objects/object-list-macros.h28
-rw-r--r--deps/v8/src/objects/object-macros-undef.h7
-rw-r--r--deps/v8/src/objects/object-macros.h185
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h13
-rw-r--r--deps/v8/src/objects/objects-definitions.h35
-rw-r--r--deps/v8/src/objects/objects-inl.h378
-rw-r--r--deps/v8/src/objects/objects.cc252
-rw-r--r--deps/v8/src/objects/objects.h67
-rw-r--r--deps/v8/src/objects/oddball-inl.h6
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h4
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc6
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h10
-rw-r--r--deps/v8/src/objects/property-array-inl.h35
-rw-r--r--deps/v8/src/objects/property-array.h6
-rw-r--r--deps/v8/src/objects/property-cell.h2
-rw-r--r--deps/v8/src/objects/property.cc3
-rw-r--r--deps/v8/src/objects/prototype-inl.h9
-rw-r--r--deps/v8/src/objects/scope-info.cc71
-rw-r--r--deps/v8/src/objects/scope-info.h23
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h22
-rw-r--r--deps/v8/src/objects/shared-function-info.h31
-rw-r--r--deps/v8/src/objects/slots.h12
-rw-r--r--deps/v8/src/objects/source-text-module.cc661
-rw-r--r--deps/v8/src/objects/source-text-module.h220
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h4
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc313
-rw-r--r--deps/v8/src/objects/stack-frame-info.h35
-rw-r--r--deps/v8/src/objects/string-inl.h77
-rw-r--r--deps/v8/src/objects/string.cc46
-rw-r--r--deps/v8/src/objects/string.h91
-rw-r--r--deps/v8/src/objects/synthetic-module.cc108
-rw-r--r--deps/v8/src/objects/synthetic-module.h69
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h162
-rw-r--r--deps/v8/src/objects/tagged-field.h76
-rw-r--r--deps/v8/src/objects/tagged-impl-inl.h44
-rw-r--r--deps/v8/src/objects/tagged-impl.h39
-rw-r--r--deps/v8/src/objects/tagged-value-inl.h31
-rw-r--r--deps/v8/src/objects/tagged-value.h6
-rw-r--r--deps/v8/src/objects/template-objects.cc4
-rw-r--r--deps/v8/src/objects/template-objects.h2
-rw-r--r--deps/v8/src/objects/templates-inl.h8
-rw-r--r--deps/v8/src/objects/templates.h2
-rw-r--r--deps/v8/src/objects/transitions-inl.h7
-rw-r--r--deps/v8/src/objects/transitions.cc2
-rw-r--r--deps/v8/src/objects/transitions.h2
-rw-r--r--deps/v8/src/objects/value-serializer.cc225
-rw-r--r--deps/v8/src/objects/value-serializer.h12
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/expression-scope.h11
-rw-r--r--deps/v8/src/parsing/parse-info.cc2
-rw-r--r--deps/v8/src/parsing/parser-base.h71
-rw-r--r--deps/v8/src/parsing/parser.cc29
-rw-r--r--deps/v8/src/parsing/parser.h30
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc29
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h13
-rw-r--r--deps/v8/src/parsing/preparse-data.cc2
-rw-r--r--deps/v8/src/parsing/preparser.cc8
-rw-r--r--deps/v8/src/parsing/preparser.h81
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc12
-rw-r--r--deps/v8/src/parsing/scanner.cc36
-rw-r--r--deps/v8/src/parsing/scanner.h4
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc16
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc7
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h2
-rw-r--r--deps/v8/src/profiler/tick-sample.cc4
-rw-r--r--deps/v8/src/regexp/OWNERS2
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h86
-rw-r--r--deps/v8/src/regexp/jsregexp.cc7055
-rw-r--r--deps/v8/src/regexp/jsregexp.h1548
-rw-r--r--deps/v8/src/regexp/regexp-ast.h24
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator-inl.h (renamed from deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h)24
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc (renamed from deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc)221
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h (renamed from deps/v8/src/regexp/regexp-macro-assembler-irregexp.h)39
-rw-r--r--deps/v8/src/regexp/regexp-bytecodes.h (renamed from deps/v8/src/regexp/bytecodes-irregexp.h)11
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc1678
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc3551
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h657
-rw-r--r--deps/v8/src/regexp/regexp-dotprinter.cc244
-rw-r--r--deps/v8/src/regexp/regexp-dotprinter.h23
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc (renamed from deps/v8/src/regexp/interpreter-irregexp.cc)269
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h (renamed from deps/v8/src/regexp/interpreter-irregexp.h)18
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-arch.h30
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc9
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h8
-rw-r--r--deps/v8/src/regexp/regexp-nodes.h675
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc66
-rw-r--r--deps/v8/src/regexp/regexp-parser.h10
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc6
-rw-r--r--deps/v8/src/regexp/regexp.cc1018
-rw-r--r--deps/v8/src/regexp/regexp.h177
-rw-r--r--deps/v8/src/roots/OWNERS11
-rw-r--r--deps/v8/src/roots/roots-inl.h3
-rw-r--r--deps/v8/src/roots/roots.h3
-rw-r--r--deps/v8/src/runtime/OWNERS3
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc40
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc31
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc30
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc1
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc44
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc7
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc349
-rw-r--r--deps/v8/src/runtime/runtime-module.cc8
-rw-r--r--deps/v8/src/runtime/runtime-object.cc82
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc12
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc48
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc18
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc1
-rw-r--r--deps/v8/src/runtime/runtime-test.cc127
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc2
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc164
-rw-r--r--deps/v8/src/runtime/runtime-weak-refs.cc3
-rw-r--r--deps/v8/src/runtime/runtime.h92
-rw-r--r--deps/v8/src/snapshot/OWNERS2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc14
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc56
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h12
-rw-r--r--deps/v8/src/snapshot/deserializer.cc127
-rw-r--r--deps/v8/src/snapshot/deserializer.h19
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc10
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc7
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc6
-rw-r--r--deps/v8/src/snapshot/natives.h1
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc3
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/references.h61
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.cc66
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.h8
-rw-r--r--deps/v8/src/snapshot/serializer-common.h30
-rw-r--r--deps/v8/src/snapshot/serializer.cc72
-rw-r--r--deps/v8/src/snapshot/serializer.h10
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h8
-rw-r--r--deps/v8/src/snapshot/snapshot.h6
-rw-r--r--deps/v8/src/strings/OWNERS4
-rw-r--r--deps/v8/src/strings/char-predicates-inl.h12
-rw-r--r--deps/v8/src/strings/char-predicates.h6
-rw-r--r--deps/v8/src/strings/string-builder-inl.h11
-rw-r--r--deps/v8/src/strings/string-builder.cc34
-rw-r--r--deps/v8/src/strings/string-stream.cc5
-rw-r--r--deps/v8/src/tasks/OWNERS6
-rw-r--r--deps/v8/src/third_party/siphash/OWNERS3
-rw-r--r--deps/v8/src/third_party/utf8-decoder/OWNERS2
-rw-r--r--deps/v8/src/third_party/valgrind/OWNERS1
-rw-r--r--deps/v8/src/third_party/vtune/OWNERS1
-rw-r--r--deps/v8/src/torque/ast.h226
-rw-r--r--deps/v8/src/torque/constants.h4
-rw-r--r--deps/v8/src/torque/contextual.h18
-rw-r--r--deps/v8/src/torque/csa-generator.cc31
-rw-r--r--deps/v8/src/torque/declarable.h85
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc23
-rw-r--r--deps/v8/src/torque/declaration-visitor.h12
-rw-r--r--deps/v8/src/torque/declarations.cc12
-rw-r--r--deps/v8/src/torque/declarations.h14
-rw-r--r--deps/v8/src/torque/earley-parser.cc25
-rw-r--r--deps/v8/src/torque/earley-parser.h4
-rw-r--r--deps/v8/src/torque/global-context.cc24
-rw-r--r--deps/v8/src/torque/global-context.h35
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc702
-rw-r--r--deps/v8/src/torque/implementation-visitor.h128
-rw-r--r--deps/v8/src/torque/ls/json-parser.cc2
-rw-r--r--deps/v8/src/torque/ls/message-handler.cc42
-rw-r--r--deps/v8/src/torque/ls/message-handler.h4
-rw-r--r--deps/v8/src/torque/ls/message-pipe.h2
-rw-r--r--deps/v8/src/torque/ls/message.h10
-rw-r--r--deps/v8/src/torque/ls/torque-language-server.cc6
-rw-r--r--deps/v8/src/torque/server-data.h1
-rw-r--r--deps/v8/src/torque/source-positions.cc59
-rw-r--r--deps/v8/src/torque/source-positions.h32
-rw-r--r--deps/v8/src/torque/torque-compiler.cc33
-rw-r--r--deps/v8/src/torque/torque-compiler.h3
-rw-r--r--deps/v8/src/torque/torque-parser.cc280
-rw-r--r--deps/v8/src/torque/torque.cc22
-rw-r--r--deps/v8/src/torque/type-oracle.cc10
-rw-r--r--deps/v8/src/torque/type-oracle.h30
-rw-r--r--deps/v8/src/torque/type-visitor.cc94
-rw-r--r--deps/v8/src/torque/types.cc22
-rw-r--r--deps/v8/src/torque/types.h23
-rw-r--r--deps/v8/src/torque/utils.cc38
-rw-r--r--deps/v8/src/torque/utils.h16
-rw-r--r--deps/v8/src/tracing/OWNERS2
-rw-r--r--deps/v8/src/tracing/trace-event.h4
-rw-r--r--deps/v8/src/trap-handler/OWNERS2
-rw-r--r--deps/v8/src/utils/OWNERS2
-rw-r--r--deps/v8/src/utils/allocation.cc4
-rw-r--r--deps/v8/src/utils/allocation.h11
-rw-r--r--deps/v8/src/utils/splay-tree-inl.h292
-rw-r--r--deps/v8/src/utils/splay-tree.h194
-rw-r--r--deps/v8/src/utils/utils.h63
-rw-r--r--deps/v8/src/utils/vector.h5
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h17
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h11
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h12
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h16
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc251
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h32
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h50
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h40
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h139
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h139
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h12
-rw-r--r--deps/v8/src/wasm/c-api.cc1058
-rw-r--r--deps/v8/src/wasm/decoder.h4
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h510
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc9
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h2
-rw-r--r--deps/v8/src/wasm/function-compiler.cc116
-rw-r--r--deps/v8/src/wasm/function-compiler.h40
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc18
-rw-r--r--deps/v8/src/wasm/js-to-wasm-wrapper-cache.h41
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc24
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h100
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc6
-rw-r--r--deps/v8/src/wasm/module-compiler.cc335
-rw-r--r--deps/v8/src/wasm/module-compiler.h30
-rw-r--r--deps/v8/src/wasm/module-decoder.cc223
-rw-r--r--deps/v8/src/wasm/module-decoder.h4
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc212
-rw-r--r--deps/v8/src/wasm/value-type.h86
-rw-r--r--deps/v8/src/wasm/wasm-arguments.h73
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc103
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h19
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc23
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc74
-rw-r--r--deps/v8/src/wasm/wasm-engine.h7
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc5
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc5
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h4
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc488
-rw-r--r--deps/v8/src/wasm/wasm-js.cc35
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc3
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc314
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h54
-rw-r--r--deps/v8/src/wasm/wasm-module.h10
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h104
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc479
-rw-r--r--deps/v8/src/wasm/wasm-objects.h117
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc42
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h54
-rw-r--r--deps/v8/src/wasm/wasm-result.cc18
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc2
-rw-r--r--deps/v8/src/wasm/wasm-text.cc175
-rw-r--r--deps/v8/src/wasm/wasm-text.h11
-rw-r--r--deps/v8/src/wasm/wasm-value.h50
-rw-r--r--deps/v8/src/zone/OWNERS3
-rw-r--r--deps/v8/src/zone/zone-allocator.h36
-rw-r--r--deps/v8/src/zone/zone-splay-tree.h38
-rw-r--r--deps/v8/src/zone/zone.cc11
-rw-r--r--deps/v8/src/zone/zone.h8
931 files changed, 41169 insertions, 34719 deletions
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index d24e647b24..1ae6a569e7 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -16,6 +16,7 @@ include_rules = [
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
"+src/heap/heap-write-barrier.h",
+ "+src/heap/read-only-heap-inl.h",
"+src/heap/read-only-heap.h",
"-src/inspector",
"-src/interpreter",
@@ -29,6 +30,10 @@ include_rules = [
"+src/interpreter/interpreter.h",
"+src/interpreter/interpreter-generator.h",
"+src/interpreter/setup-interpreter.h",
+ "-src/regexp",
+ "+src/regexp/regexp.h",
+ "+src/regexp/regexp-stack.h",
+ "+src/regexp/regexp-utils.h",
"-src/trap-handler",
"+src/trap-handler/handler-inside-posix.h",
"+src/trap-handler/handler-inside-win.h",
@@ -44,5 +49,6 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
"+include/libplatform/v8-tracing.h",
+ "+perfetto/tracing.h"
],
}
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index abad5274c8..c6881f2321 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -1,9 +1,5 @@
-per-file intl.*=cira@chromium.org
-per-file intl.*=mnita@google.com
-per-file intl.*=jshin@chromium.org
-per-file typing-asm.*=aseemgarg@chromium.org
-per-file objects-body-descriptors*=hpayer@chromium.org
-per-file objects-body-descriptors*=mlippautz@chromium.org
-per-file objects-body-descriptors*=ulan@chromium.org
+per-file *DEPS=file://COMMON_OWNERS
+per-file intl-*=file://INTL_OWNERS
+per-file *-intl*=file://INTL_OWNERS
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS
new file mode 100644
index 0000000000..ce6fb20af8
--- /dev/null
+++ b/deps/v8/src/api/OWNERS
@@ -0,0 +1,11 @@
+file://include/OWNERS
+clemensh@chromium.org
+ishell@chromium.org
+jkummerow@chromium.org
+leszeks@chromium.org
+mlippautz@chromium.org
+mslekova@chromium.org
+mstarzinger@chromium.org
+verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index c22b7c47f9..cd380d3cda 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -5,8 +5,8 @@
#include "src/api/api-natives.h"
#include "src/api/api-inl.h"
+#include "src/common/message-template.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/lookup.h"
@@ -39,7 +39,6 @@ class InvokeScope {
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target,
- bool is_hidden_prototype,
bool is_prototype);
MaybeHandle<JSFunction> InstantiateFunction(
@@ -54,7 +53,7 @@ MaybeHandle<Object> Instantiate(
isolate, Handle<FunctionTemplateInfo>::cast(data), maybe_name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
- Handle<JSReceiver>(), false, false);
+ Handle<JSReceiver>(), false);
} else {
return data;
}
@@ -129,7 +128,7 @@ void DisableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
// Copy map so it won't interfere constructor's initial map.
Handle<Map> new_map = Map::Copy(isolate, old_map, "DisableAccessChecks");
new_map->set_is_access_check_needed(false);
- JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
+ JSObject::MigrateToMap(isolate, Handle<JSObject>::cast(object), new_map);
}
void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
@@ -138,7 +137,7 @@ void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
Handle<Map> new_map = Map::Copy(isolate, old_map, "EnableAccessChecks");
new_map->set_is_access_check_needed(true);
new_map->set_may_have_interesting_symbols(true);
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
}
class AccessCheckDisableScope {
@@ -178,8 +177,7 @@ Object GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
template <typename TemplateInfoT>
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
- Handle<TemplateInfoT> data,
- bool is_hidden_prototype) {
+ Handle<TemplateInfoT> data) {
HandleScope scope(isolate);
// Disable access checks while instantiating the object.
AccessCheckDisableScope access_check_scope(isolate, obj);
@@ -246,11 +244,10 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
} else {
auto getter = handle(properties->get(i++), isolate);
auto setter = handle(properties->get(i++), isolate);
- RETURN_ON_EXCEPTION(
- isolate,
- DefineAccessorProperty(isolate, obj, name, getter, setter,
- attributes, is_hidden_prototype),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate,
+ DefineAccessorProperty(isolate, obj, name, getter,
+ setter, attributes, false),
+ JSObject);
}
} else {
// Intrinsic data property --- Get appropriate value from the current
@@ -364,7 +361,6 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info,
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target,
- bool is_hidden_prototype,
bool is_prototype) {
Handle<JSFunction> constructor;
int serial_number = Smi::ToInt(info->serial_number());
@@ -413,8 +409,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
if (is_prototype) JSObject::OptimizeAsPrototype(object);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject);
+ isolate, result, ConfigureInstance(isolate, object, info), JSObject);
if (info->immutable_proto()) {
JSObject::SetImmutableProto(object);
}
@@ -486,7 +481,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
InstantiateObject(
isolate,
handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
- Handle<JSReceiver>(), false, true),
+ Handle<JSReceiver>(), true),
JSFunction);
}
Object parent = data->GetParentTemplate();
@@ -514,8 +509,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
function);
}
- MaybeHandle<JSObject> result =
- ConfigureInstance(isolate, function, data, false);
+ MaybeHandle<JSObject> result = ConfigureInstance(isolate, function, data);
if (result.is_null()) {
// Uncache on error.
if (serial_number) {
@@ -560,8 +554,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateObject(
Isolate* isolate, Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target) {
InvokeScope invoke_scope(isolate);
- return ::v8::internal::InstantiateObject(isolate, data, new_target, false,
- false);
+ return ::v8::internal::InstantiateObject(isolate, data, new_target, false);
}
MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 90ff932215..e02c74416b 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -237,18 +237,10 @@ namespace v8 {
#define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing<T>())
-#define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \
- return maybe_local.FromMaybe(Local<T>());
-
#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
namespace {
-Local<Context> ContextFromNeverReadOnlySpaceObject(
- i::Handle<i::JSReceiver> obj) {
- return reinterpret_cast<v8::Isolate*>(obj->GetIsolate())->GetCurrentContext();
-}
-
class InternalEscapableScope : public v8::EscapableHandleScope {
public:
explicit inline InternalEscapableScope(i::Isolate* isolate)
@@ -447,7 +439,7 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
heap_stats.end_marker = &end_marker;
if (isolate->heap()->HasBeenSetUp()) {
// BUG(1718): Don't use the take_snapshot since we don't support
- // HeapIterator here without doing a special GC.
+ // HeapObjectIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
char* first_newline = strchr(last_few_messages, '\n');
if (first_newline == nullptr || first_newline[1] == '\0')
@@ -764,9 +756,9 @@ StartupData SnapshotCreator::CreateBlob(
std::vector<i::Handle<i::SharedFunctionInfo>> sfis_to_clear;
{ // Heap allocation is disallowed within this scope.
- i::HeapIterator heap_iterator(isolate->heap());
- for (i::HeapObject current_obj = heap_iterator.next();
- !current_obj.is_null(); current_obj = heap_iterator.next()) {
+ i::HeapObjectIterator heap_iterator(isolate->heap());
+ for (i::HeapObject current_obj = heap_iterator.Next();
+ !current_obj.is_null(); current_obj = heap_iterator.Next()) {
if (current_obj.IsSharedFunctionInfo()) {
i::SharedFunctionInfo shared =
i::SharedFunctionInfo::cast(current_obj);
@@ -810,17 +802,19 @@ StartupData SnapshotCreator::CreateBlob(
i::SerializedHandleChecker handle_checker(isolate, &contexts);
CHECK(handle_checker.CheckGlobalAndEternalHandles());
- i::HeapIterator heap_iterator(isolate->heap());
- for (i::HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
- current_obj = heap_iterator.next()) {
+ i::HeapObjectIterator heap_iterator(isolate->heap());
+ for (i::HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null();
+ current_obj = heap_iterator.Next()) {
if (current_obj.IsJSFunction()) {
i::JSFunction fun = i::JSFunction::cast(current_obj);
// Complete in-object slack tracking for all functions.
fun.CompleteInobjectSlackTrackingIfActive();
+ fun.ResetIfBytecodeFlushed();
+
// Also, clear out feedback vectors, or any optimized code.
- if (!fun.raw_feedback_cell().value().IsUndefined()) {
+ if (fun.IsOptimized() || fun.IsInterpreted()) {
fun.raw_feedback_cell().set_value(
i::ReadOnlyRoots(isolate).undefined_value());
fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
@@ -963,42 +957,57 @@ Extension::Extension(const char* name, const char* source, int dep_count,
CHECK(source != nullptr || source_length_ == 0);
}
-ResourceConstraints::ResourceConstraints()
- : max_semi_space_size_in_kb_(0),
- max_old_space_size_(0),
- stack_limit_(nullptr),
- code_range_size_(0),
- max_zone_pool_size_(0) {}
+void ResourceConstraints::ConfigureDefaultsFromHeapSize(
+ size_t initial_heap_size_in_bytes, size_t maximum_heap_size_in_bytes) {
+ CHECK_LE(initial_heap_size_in_bytes, maximum_heap_size_in_bytes);
+ if (maximum_heap_size_in_bytes == 0) {
+ return;
+ }
+ size_t young_generation, old_generation;
+ i::Heap::GenerationSizesFromHeapSize(maximum_heap_size_in_bytes,
+ &young_generation, &old_generation);
+ set_max_young_generation_size_in_bytes(
+ i::Max(young_generation, i::Heap::MinYoungGenerationSize()));
+ set_max_old_generation_size_in_bytes(
+ i::Max(old_generation, i::Heap::MinOldGenerationSize()));
+ if (initial_heap_size_in_bytes > 0) {
+ i::Heap::GenerationSizesFromHeapSize(initial_heap_size_in_bytes,
+ &young_generation, &old_generation);
+ // We do not set lower bounds for the initial sizes.
+ set_initial_young_generation_size_in_bytes(young_generation);
+ set_initial_old_generation_size_in_bytes(old_generation);
+ }
+ if (i::kRequiresCodeRange) {
+ set_code_range_size_in_bytes(
+ i::Min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
+ }
+}
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
- set_max_semi_space_size_in_kb(
- i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
- set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
+ size_t heap_size = i::Heap::HeapSizeFromPhysicalMemory(physical_memory);
+ size_t young_generation, old_generation;
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young_generation,
+ &old_generation);
+ set_max_young_generation_size_in_bytes(young_generation);
+ set_max_old_generation_size_in_bytes(old_generation);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
- // Reserve no more than 1/8 of the memory for the code range, but at most
- // kMaximalCodeRangeSize.
- set_code_range_size(
- i::Min(i::kMaximalCodeRangeSize / i::MB,
- static_cast<size_t>((virtual_memory_limit >> 3) / i::MB)));
+ set_code_range_size_in_bytes(
+ i::Min(i::kMaximalCodeRangeSize,
+ static_cast<size_t>(virtual_memory_limit / 8)));
}
}
-void SetResourceConstraints(i::Isolate* isolate,
- const ResourceConstraints& constraints) {
- size_t semi_space_size = constraints.max_semi_space_size_in_kb();
- size_t old_space_size = constraints.max_old_space_size();
- size_t code_range_size = constraints.code_range_size();
- if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
- isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
- code_range_size);
- }
+size_t ResourceConstraints::max_semi_space_size_in_kb() const {
+ return i::Heap::SemiSpaceSizeFromYoungGenerationSize(
+ max_young_generation_size_) /
+ i::KB;
+}
- if (constraints.stack_limit() != nullptr) {
- uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
- isolate->stack_guard()->SetStackLimit(limit);
- }
+void ResourceConstraints::set_max_semi_space_size_in_kb(size_t limit_in_kb) {
+ set_max_young_generation_size_in_bytes(
+ i::Heap::YoungGenerationSizeFromSemiSpaceSize(limit_in_kb * i::KB));
}
i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
@@ -1369,29 +1378,28 @@ static Local<ObjectTemplate> ObjectTemplateNew(
bool do_not_cache);
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::Object> result(Utils::OpenHandle(this)->GetPrototypeTemplate(),
- i_isolate);
+ i::Handle<i::Object> result(self->GetPrototypeTemplate(), i_isolate);
if (result->IsUndefined(i_isolate)) {
// Do not cache prototype objects.
result = Utils::OpenHandle(
*ObjectTemplateNew(i_isolate, Local<FunctionTemplate>(), true));
- i::FunctionTemplateInfo::SetPrototypeTemplate(
- i_isolate, Utils::OpenHandle(this), result);
+ i::FunctionTemplateInfo::SetPrototypeTemplate(i_isolate, self, result);
}
return ToApiHandle<ObjectTemplate>(result);
}
void FunctionTemplate::SetPrototypeProviderTemplate(
Local<FunctionTemplate> prototype_provider) {
- i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> result = Utils::OpenHandle(*prototype_provider);
- auto info = Utils::OpenHandle(this);
- CHECK(info->GetPrototypeTemplate().IsUndefined(i_isolate));
- CHECK(info->GetParentTemplate().IsUndefined(i_isolate));
- i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, info,
+ CHECK(self->GetPrototypeTemplate().IsUndefined(i_isolate));
+ CHECK(self->GetParentTemplate().IsUndefined(i_isolate));
+ i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, self,
result);
}
@@ -1420,17 +1428,21 @@ static Local<FunctionTemplate> FunctionTemplateNew(
i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
- InitializeFunctionTemplate(obj);
- obj->set_do_not_cache(do_not_cache);
- int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
- if (!do_not_cache) {
- next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
+ {
+ // Disallow GC until all fields of obj have acceptable types.
+ i::DisallowHeapAllocation no_gc;
+ InitializeFunctionTemplate(obj);
+ obj->set_length(length);
+ obj->set_do_not_cache(do_not_cache);
+ int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
+ if (!do_not_cache) {
+ next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
+ }
+ obj->set_serial_number(i::Smi::FromInt(next_serial_number));
}
- obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type);
}
- obj->set_length(length);
obj->set_undetectable(false);
obj->set_needs_access_check(false);
obj->set_accept_any_receiver(true);
@@ -2000,9 +2012,10 @@ bool ObjectTemplate::IsImmutableProto() {
}
void ObjectTemplate::SetImmutableProto() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- Utils::OpenHandle(this)->set_immutable_proto(true);
+ self->set_immutable_proto(true);
}
// --- S c r i p t s ---
@@ -2222,29 +2235,40 @@ Local<Value> Module::GetException() const {
int Module::GetModuleRequestsLength() const {
i::Handle<i::Module> self = Utils::OpenHandle(this);
- return self->info().module_requests().length();
+ if (self->IsSyntheticModule()) return 0;
+ return i::Handle<i::SourceTextModule>::cast(self)
+ ->info()
+ .module_requests()
+ .length();
}
Local<String> Module::GetModuleRequest(int i) const {
CHECK_GE(i, 0);
i::Handle<i::Module> self = Utils::OpenHandle(this);
+ CHECK(self->IsSourceTextModule());
i::Isolate* isolate = self->GetIsolate();
- i::Handle<i::FixedArray> module_requests(self->info().module_requests(),
- isolate);
+ i::Handle<i::FixedArray> module_requests(
+ i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
+ isolate);
CHECK_LT(i, module_requests->length());
return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
}
Location Module::GetModuleRequestLocation(int i) const {
CHECK_GE(i, 0);
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope scope(isolate);
i::Handle<i::Module> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::HandleScope scope(isolate);
+ CHECK(self->IsSourceTextModule());
i::Handle<i::FixedArray> module_request_positions(
- self->info().module_request_positions(), isolate);
+ i::Handle<i::SourceTextModule>::cast(self)
+ ->info()
+ .module_request_positions(),
+ isolate);
CHECK_LT(i, module_request_positions->length());
int position = i::Smi::ToInt(module_request_positions->get(i));
- i::Handle<i::Script> script(self->script(), isolate);
+ i::Handle<i::Script> script(
+ i::Handle<i::SourceTextModule>::cast(self)->script(), isolate);
i::Script::PositionInfo info;
i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET);
return v8::Location(info.line, info.column);
@@ -2265,8 +2289,10 @@ Local<UnboundModuleScript> Module::GetUnboundModuleScript() {
GetStatus() < kEvaluating, "v8::Module::GetUnboundScript",
"v8::Module::GetUnboundScript must be used on an unevaluated module");
i::Handle<i::Module> self = Utils::OpenHandle(this);
+ CHECK(self->IsSourceTextModule());
return ToApiHandle<UnboundModuleScript>(i::Handle<i::SharedFunctionInfo>(
- self->GetSharedFunctionInfo(), self->GetIsolate()));
+ i::Handle<i::SourceTextModule>::cast(self)->GetSharedFunctionInfo(),
+ self->GetIsolate()));
}
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
@@ -2301,6 +2327,37 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
RETURN_ESCAPED(result);
}
+Local<Module> Module::CreateSyntheticModule(
+ Isolate* isolate, Local<String> module_name,
+ const std::vector<Local<v8::String>>& export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_module_name = Utils::OpenHandle(*module_name);
+ i::Handle<i::FixedArray> i_export_names = i_isolate->factory()->NewFixedArray(
+ static_cast<int>(export_names.size()));
+ for (int i = 0; i < i_export_names->length(); ++i) {
+ i::Handle<i::String> str = Utils::OpenHandle(*export_names[i]);
+ i_export_names->set(i, *str);
+ }
+ return v8::Utils::ToLocal(
+ i::Handle<i::Module>(i_isolate->factory()->NewSyntheticModule(
+ i_module_name, i_export_names, evaluation_steps)));
+}
+
+void Module::SetSyntheticModuleExport(Local<String> export_name,
+ Local<v8::Value> export_value) {
+ i::Handle<i::String> i_export_name = Utils::OpenHandle(*export_name);
+ i::Handle<i::Object> i_export_value = Utils::OpenHandle(*export_value);
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ Utils::ApiCheck(self->IsSyntheticModule(),
+ "v8::Module::SetSyntheticModuleExport",
+ "v8::Module::SetSyntheticModuleExport must only be called on "
+ "a SyntheticModule");
+ i::SyntheticModule::SetExport(self->GetIsolate(),
+ i::Handle<i::SyntheticModule>::cast(self),
+ i_export_name, i_export_value);
+}
+
namespace {
i::Compiler::ScriptDetails GetScriptDetails(
@@ -2411,7 +2468,7 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
i::Handle<i::SharedFunctionInfo> shared = Utils::OpenHandle(*unbound);
- return ToApiHandle<Module>(i_isolate->factory()->NewModule(shared));
+ return ToApiHandle<Module>(i_isolate->factory()->NewSourceTextModule(shared));
}
namespace {
@@ -2745,11 +2802,12 @@ void v8::TryCatch::SetCaptureMessage(bool value) { capture_message_ = value; }
// --- M e s s a g e ---
Local<String> Message::Get() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj);
+ i::Handle<i::String> raw_result =
+ i::MessageHandler::GetMessage(isolate, self);
Local<String> result = Utils::ToLocal(raw_result);
return scope.Escape(result);
}
@@ -2760,10 +2818,10 @@ v8::Isolate* Message::GetIsolate() const {
}
ScriptOrigin Message::GetScriptOrigin() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Script> script(message->script(), isolate);
+ i::Handle<i::Script> script(self->script(), isolate);
return GetScriptOriginForScript(isolate, script);
}
@@ -2772,11 +2830,11 @@ v8::Local<Value> Message::GetScriptResourceName() const {
}
v8::Local<v8::StackTrace> Message::GetStackTrace() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
+ i::Handle<i::Object> stackFramesObj(self->stack_frames(), isolate);
if (!stackFramesObj->IsFixedArray()) return v8::Local<v8::StackTrace>();
auto stackTrace = i::Handle<i::FixedArray>::cast(stackFramesObj);
return scope.Escape(Utils::StackTraceToLocal(stackTrace));
@@ -2845,18 +2903,17 @@ Maybe<int> Message::GetEndColumn(Local<Context> context) const {
}
bool Message::IsSharedCrossOrigin() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- return Utils::OpenHandle(this)
- ->script()
- .origin_options()
- .IsSharedCrossOrigin();
+ return self->script().origin_options().IsSharedCrossOrigin();
}
bool Message::IsOpaque() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- return Utils::OpenHandle(this)->script().origin_options().IsOpaque();
+ return self->script().origin_options().IsOpaque();
}
MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
@@ -2903,11 +2960,11 @@ Local<StackTrace> StackTrace::CurrentStackTrace(Isolate* isolate,
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
- return i::StackTraceFrame::GetLineNumber(Utils::OpenHandle(this));
+ return i::StackTraceFrame::GetOneBasedLineNumber(Utils::OpenHandle(this));
}
int StackFrame::GetColumn() const {
- return i::StackTraceFrame::GetColumnNumber(Utils::OpenHandle(this));
+ return i::StackTraceFrame::GetOneBasedColumnNumber(Utils::OpenHandle(this));
}
int StackFrame::GetScriptId() const {
@@ -2915,30 +2972,31 @@ int StackFrame::GetScriptId() const {
}
Local<String> StackFrame::GetScriptName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> name =
- i::StackTraceFrame::GetFileName(Utils::OpenHandle(this));
+ i::Handle<i::Object> name = i::StackTraceFrame::GetFileName(self);
return name->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
}
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::Object> name =
- i::StackTraceFrame::GetScriptNameOrSourceUrl(Utils::OpenHandle(this));
+ i::StackTraceFrame::GetScriptNameOrSourceUrl(self);
return name->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
}
Local<String> StackFrame::GetFunctionName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> name =
- i::StackTraceFrame::GetFunctionName(Utils::OpenHandle(this));
+ i::Handle<i::Object> name = i::StackTraceFrame::GetFunctionName(self);
return name->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
@@ -3518,8 +3576,7 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
}
i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) {
- return i::NeverReadOnlySpaceObject::GetIsolate(
- i::HeapObject::cast(i::Object(obj)));
+ return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj)));
}
bool i::ShouldThrowOnError(i::Isolate* isolate) {
@@ -3866,11 +3923,6 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
return Just(true);
}
-bool v8::Object::Set(v8::Local<Value> key, v8::Local<Value> value) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return Set(context, key, value).FromMaybe(false);
-}
-
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
v8::Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -3884,11 +3936,6 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
return Just(true);
}
-bool v8::Object::Set(uint32_t index, v8::Local<Value> value) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return Set(context, index, value).FromMaybe(false);
-}
-
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value) {
@@ -4106,11 +4153,6 @@ MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
RETURN_ESCAPED(Utils::ToLocal(result));
}
-Local<Value> v8::Object::Get(v8::Local<Value> key) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Get(context, key), Value);
-}
-
MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
PREPARE_FOR_EXECUTION(context, Object, Get, Value);
auto self = Utils::OpenHandle(this);
@@ -4121,11 +4163,6 @@ MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
RETURN_ESCAPED(Utils::ToLocal(result));
}
-Local<Value> v8::Object::Get(uint32_t index) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Get(context, index), Value);
-}
-
MaybeLocal<Value> v8::Object::GetPrivate(Local<Context> context,
Local<Private> key) {
return Get(context, Local<Value>(reinterpret_cast<Value*>(*key)));
@@ -4171,8 +4208,8 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
}
Local<Value> v8::Object::GetPrototype() {
- auto isolate = Utils::OpenHandle(this)->GetIsolate();
auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
i::PrototypeIterator iter(isolate, self);
return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter));
}
@@ -4424,10 +4461,10 @@ void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
AccessControl settings) {
// TODO(verwaest): Remove |settings|.
DCHECK_EQ(v8::DEFAULT, settings);
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
- auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return;
i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
@@ -4637,9 +4674,9 @@ Local<v8::Context> v8::Object::CreationContext() {
int v8::Object::GetIdentityHash() {
i::DisallowHeapAllocation no_gc;
- auto isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope scope(isolate);
auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
+ i::HandleScope scope(isolate);
return self->GetOrCreateIdentityHash(isolate).value();
}
@@ -4825,9 +4862,9 @@ Local<Value> Function::GetDebugName() const {
}
Local<Value> Function::GetDisplayName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (!self->IsJSFunction()) {
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -5358,20 +5395,15 @@ Local<Value> Symbol::Name() const {
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
i::Isolate* isolate;
- if (!i::GetIsolateFromWritableObject(*sym, &isolate)) {
- // If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE
- // objects are immovable we can use the Handle(Address*) constructor with
- // the address of the name field in the Symbol object without needing an
- // isolate.
-#ifdef V8_COMPRESS_POINTERS
- // Compressed fields can't serve as handle locations.
- // TODO(ishell): get Isolate as a parameter.
- isolate = i::Isolate::Current();
-#else
+ if (!i::GetIsolateFromHeapObject(*sym, &isolate)) {
+ // Symbol is in RO_SPACE, which means that its name is also in RO_SPACE.
+ // Since RO_SPACE objects are immovable we can use the Handle(Address*)
+ // constructor with the address of the name field in the Symbol object
+ // without needing an isolate.
+ DCHECK(!COMPRESS_POINTERS_BOOL);
i::Handle<i::HeapObject> ro_name(reinterpret_cast<i::Address*>(
sym->GetFieldAddress(i::Symbol::kNameOffset)));
return Utils::ToLocal(ro_name);
-#endif
}
i::Handle<i::Object> name(sym->name(), isolate);
@@ -5917,6 +5949,19 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
+void Context::SetAbortScriptExecution(
+ Context::AbortScriptExecutionCallback callback) {
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+ if (callback == nullptr) {
+ context->set_script_execution_callback(
+ i::ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ SET_FIELD_WRAPPED(isolate, context, set_script_execution_callback,
+ callback);
+ }
+}
+
namespace {
i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate,
i::FixedArray list, size_t index) {
@@ -6218,8 +6263,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
// It is safe to call GetIsolateFromWritableHeapObject because
// SupportsExternalization already checked that the object is writable.
- i::Isolate* isolate;
- i::GetIsolateFromWritableObject(obj, &isolate);
+ i::Isolate* isolate = i::GetIsolateFromWritableObject(obj);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
CHECK(resource && resource->data());
@@ -6246,8 +6290,7 @@ bool v8::String::MakeExternal(
// It is safe to call GetIsolateFromWritableHeapObject because
// SupportsExternalization already checked that the object is writable.
- i::Isolate* isolate;
- i::GetIsolateFromWritableObject(obj, &isolate);
+ i::Isolate* isolate = i::GetIsolateFromWritableObject(obj);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
CHECK(resource && resource->data());
@@ -6364,10 +6407,11 @@ Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
double v8::NumberObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, NumberObject, NumberValue);
- return jsvalue->value().Number();
+ return js_primitive_wrapper->value().Number();
}
Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
@@ -6382,11 +6426,12 @@ Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
Local<v8::BigInt> v8::BigIntObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, BigIntObject, BigIntValue);
- return Utils::ToLocal(
- i::Handle<i::BigInt>(i::BigInt::cast(jsvalue->value()), isolate));
+ return Utils::ToLocal(i::Handle<i::BigInt>(
+ i::BigInt::cast(js_primitive_wrapper->value()), isolate));
}
Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
@@ -6404,10 +6449,11 @@ Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, BooleanObject, BooleanValue);
- return jsvalue->value().IsTrue(isolate);
+ return js_primitive_wrapper->value().IsTrue(isolate);
}
Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
@@ -6423,11 +6469,12 @@ Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
Local<v8::String> v8::StringObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, StringObject, StringValue);
- return Utils::ToLocal(
- i::Handle<i::String>(i::String::cast(jsvalue->value()), isolate));
+ return Utils::ToLocal(i::Handle<i::String>(
+ i::String::cast(js_primitive_wrapper->value()), isolate));
}
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
@@ -6442,11 +6489,12 @@ Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, SymbolObject, SymbolValue);
- return Utils::ToLocal(
- i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value()), isolate));
+ return Utils::ToLocal(i::Handle<i::Symbol>(
+ i::Symbol::cast(js_primitive_wrapper->value()), isolate));
}
MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
@@ -7839,7 +7887,12 @@ void Isolate::Initialize(Isolate* isolate,
i_isolate->set_api_external_references(params.external_references);
i_isolate->set_allow_atomics_wait(params.allow_atomics_wait);
- SetResourceConstraints(i_isolate, params.constraints);
+ i_isolate->heap()->ConfigureHeap(params.constraints);
+ if (params.constraints.stack_limit() != nullptr) {
+ uintptr_t limit =
+ reinterpret_cast<uintptr_t>(params.constraints.stack_limit());
+ i_isolate->stack_guard()->SetStackLimit(limit);
+ }
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(isolate);
if (!i::Snapshot::Initialize(i_isolate)) {
@@ -8291,9 +8344,9 @@ void Isolate::LowMemoryNotification() {
i::GarbageCollectionReason::kLowMemoryNotification);
}
{
- i::HeapIterator iterator(isolate->heap());
- for (i::HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ i::HeapObjectIterator iterator(isolate->heap());
+ for (i::HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsAbstractCode()) {
i::AbstractCode::cast(obj).DropStackFrameCache();
}
@@ -8304,9 +8357,14 @@ void Isolate::LowMemoryNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!dependant_context) {
- // We left the current context, we can abort all WebAssembly compilations on
- // that isolate.
- isolate->wasm_engine()->DeleteCompileJobsOnIsolate(isolate);
+ if (!isolate->context().is_null()) {
+ // We left the current context, we can abort all WebAssembly compilations
+ // of that context.
+ // A handle scope for the native context.
+ i::HandleScope handle_scope(isolate);
+ isolate->wasm_engine()->DeleteCompileJobsOnContext(
+ isolate->native_context());
+ }
}
// TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
@@ -8408,6 +8466,9 @@ CALLBACK_SETTER(FatalErrorHandler, FatalErrorCallback, exception_behavior)
CALLBACK_SETTER(OOMErrorHandler, OOMErrorCallback, oom_behavior)
CALLBACK_SETTER(AllowCodeGenerationFromStringsCallback,
AllowCodeGenerationFromStringsCallback, allow_code_gen_callback)
+CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback,
+ ModifyCodeGenerationFromStringsCallback,
+ modify_code_gen_callback)
CALLBACK_SETTER(AllowWasmCodeGenerationCallback,
AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback)
@@ -8839,9 +8900,9 @@ std::vector<int> debug::Script::LineEnds() const {
}
MaybeLocal<String> debug::Script::Name() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->name(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -8849,9 +8910,9 @@ MaybeLocal<String> debug::Script::Name() const {
}
MaybeLocal<String> debug::Script::SourceURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->source_url(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -8859,9 +8920,9 @@ MaybeLocal<String> debug::Script::SourceURL() const {
}
MaybeLocal<String> debug::Script::SourceMappingURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->source_mapping_url(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -8869,18 +8930,18 @@ MaybeLocal<String> debug::Script::SourceMappingURL() const {
}
Maybe<int> debug::Script::ContextId() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Object value = script->context_data();
if (value.IsSmi()) return Just(i::Smi::ToInt(value));
return Nothing<int>();
}
MaybeLocal<String> debug::Script::Source() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->source(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -10171,6 +10232,17 @@ void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
}
}
+void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) {
+ if (isolate_) {
+ i::LocalEmbedderHeapTracer* const tracer =
+ reinterpret_cast<i::Isolate*>(isolate_)
+ ->heap()
+ ->local_embedder_heap_tracer();
+ DCHECK_NOT_NULL(tracer);
+ tracer->DecreaseAllocatedSize(bytes);
+ }
+}
+
void EmbedderHeapTracer::RegisterEmbedderReference(
const TracedGlobal<v8::Value>& ref) {
if (ref.IsEmpty()) return;
@@ -10360,8 +10432,7 @@ void InvokeAccessorGetterCallback(
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback) {
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kInvokeFunctionCallback);
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
Address callback_address = reinterpret_cast<Address>(callback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, callback_address);
@@ -10382,7 +10453,6 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
#undef EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE
#undef RETURN_ON_FAILED_EXECUTION
#undef RETURN_ON_FAILED_EXECUTION_PRIMITIVE
-#undef RETURN_TO_LOCAL_UNCHECKED
#undef RETURN_ESCAPED
#undef SET_FIELD_WRAPPED
#undef NEW_STRING
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index e041a5daf0..6135a7dfc6 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -14,9 +14,9 @@
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-proxy.h"
-#include "src/objects/module.h"
#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/source-text-module.h"
#include "src/utils/detachable-vector.h"
#include "src/objects/templates.h"
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index d4103ae0c1..08f39f8d6a 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
ahaas@chromium.org
clemensh@chromium.org
mstarzinger@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 5a38eeef36..7433b6a12c 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -12,9 +12,9 @@
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
#include "src/common/assert-scope.h"
+#include "src/common/message-template.h"
#include "src/execution/execution.h"
#include "src/execution/isolate.h"
-#include "src/execution/message-template.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
@@ -249,9 +249,9 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
return FAILED;
}
module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
- parser.module_builder()->WriteTo(*module_);
+ parser.module_builder()->WriteTo(module_);
asm_offsets_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
- parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets_);
+ parser.module_builder()->WriteAsmJsOffsetTable(asm_offsets_);
stdlib_uses_ = *parser.stdlib_uses();
size_t compile_zone_size =
@@ -287,7 +287,7 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(
isolate, &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()),
- uses_bitset)
+ uses_bitset, shared_info->language_mode())
.ToHandleChecked();
DCHECK(!thrower.error());
compile_time_ = compile_timer.Elapsed().InMillisecondsF();
@@ -319,10 +319,10 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) {
translation_throughput);
}
-UnoptimizedCompilationJob* AsmJs::NewCompilationJob(
+std::unique_ptr<UnoptimizedCompilationJob> AsmJs::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator) {
- return new AsmJsCompilationJob(parse_info, literal, allocator);
+ return base::make_unique<AsmJsCompilationJob>(parse_info, literal, allocator);
}
namespace {
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index 46dd3f2e34..3e714cba7a 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -23,7 +23,7 @@ class UnoptimizedCompilationJob;
// Interface to compile and instantiate for asm.js modules.
class AsmJs {
public:
- static UnoptimizedCompilationJob* NewCompilationJob(
+ static std::unique_ptr<UnoptimizedCompilationJob> NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 3d290a1fe1..6ac39dc89c 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -253,7 +253,7 @@ void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable,
const WasmInitExpr& init) {
info->kind = VarKind::kGlobal;
info->type = type;
- info->index = module_builder_->AddGlobal(vtype, false, true, init);
+ info->index = module_builder_->AddGlobal(vtype, true, init);
info->mutable_variable = mutable_variable;
}
@@ -385,7 +385,8 @@ void AsmJsParser::ValidateModule() {
module_builder_->MarkStartFunction(start);
for (auto& global_import : global_imports_) {
uint32_t import_index = module_builder_->AddGlobalImport(
- global_import.import_name, global_import.value_type);
+ global_import.import_name, global_import.value_type,
+ false /* mutability */);
start->EmitWithI32V(kExprGetGlobal, import_index);
start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info));
}
@@ -754,7 +755,7 @@ void AsmJsParser::ValidateFunction() {
// Record start of the function, used as position for the stack check.
current_function_builder_->SetAsmFunctionStartPosition(scanner_.Position());
- CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
+ CachedVector<AsmType*> params(&cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
// Check against limit on number of parameters.
@@ -762,7 +763,7 @@ void AsmJsParser::ValidateFunction() {
FAIL("Number of parameters exceeds internal limit");
}
- CachedVector<ValueType> locals(cached_valuetype_vectors_);
+ CachedVector<ValueType> locals(&cached_valuetype_vectors_);
ValidateFunctionLocals(params.size(), &locals);
function_temp_locals_offset_ = static_cast<uint32_t>(
@@ -837,7 +838,7 @@ void AsmJsParser::ValidateFunctionParams(ZoneVector<AsmType*>* params) {
scanner_.EnterLocalScope();
EXPECT_TOKEN('(');
CachedVector<AsmJsScanner::token_t> function_parameters(
- cached_token_t_vectors_);
+ &cached_token_t_vectors_);
while (!failed_ && !Peek(')')) {
if (!scanner_.IsLocal()) {
FAIL("Expected parameter name");
@@ -969,7 +970,8 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
if (negate) {
dvalue = -dvalue;
}
- current_function_builder_->EmitF32Const(dvalue);
+ float fvalue = DoubleToFloat32(dvalue);
+ current_function_builder_->EmitF32Const(fvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
if (uvalue > 0x7FFFFFFF) {
@@ -1314,7 +1316,7 @@ void AsmJsParser::SwitchStatement() {
Begin(pending_label_);
pending_label_ = 0;
// TODO(bradnelson): Make less weird.
- CachedVector<int32_t> cases(cached_int_vectors_);
+ CachedVector<int32_t> cases(&cached_int_vectors_);
GatherCases(&cases);
EXPECT_TOKEN('{');
size_t count = cases.size() + 1;
@@ -2108,7 +2110,11 @@ AsmType* AsmJsParser::ValidateCall() {
// need to match the information stored at this point.
base::Optional<TemporaryVariableScope> tmp;
if (Check('[')) {
- RECURSEn(EqualityExpression());
+ AsmType* index = nullptr;
+ RECURSEn(index = EqualityExpression());
+ if (!index->IsA(AsmType::Intish())) {
+ FAILn("Expected intish index");
+ }
EXPECT_TOKENn('&');
uint32_t mask = 0;
if (!CheckForUnsigned(&mask)) {
@@ -2161,8 +2167,8 @@ AsmType* AsmJsParser::ValidateCall() {
}
// Parse argument list and gather types.
- CachedVector<AsmType*> param_types(cached_asm_type_p_vectors_);
- CachedVector<AsmType*> param_specific_types(cached_asm_type_p_vectors_);
+ CachedVector<AsmType*> param_types(&cached_asm_type_p_vectors_);
+ CachedVector<AsmType*> param_specific_types(&cached_asm_type_p_vectors_);
EXPECT_TOKENn('(');
while (!failed_ && !Peek(')')) {
AsmType* t;
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index 8740cdad11..c7bf30c29e 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -154,9 +154,9 @@ class AsmJsParser {
template <typename T>
class CachedVector final : public ZoneVector<T> {
public:
- explicit CachedVector(CachedVectors<T>& cache)
- : ZoneVector<T>(cache.zone()), cache_(&cache) {
- cache.fill(this);
+ explicit CachedVector(CachedVectors<T>* cache)
+ : ZoneVector<T>(cache->zone()), cache_(cache) {
+ cache->fill(this);
}
~CachedVector() { cache_->reuse(this); }
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index e95afc8afa..e6daa80ec9 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
adamk@chromium.org
bmeurer@chromium.org
gsathya@chromium.org
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index a930a374b8..9987eb2844 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -49,8 +49,6 @@ static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
return "UnknownIntrinsicIndex";
}
-void AstNode::Print() { Print(Isolate::Current()); }
-
void AstNode::Print(Isolate* isolate) {
AllowHandleDereference allow_deref;
AstPrinter::PrintOut(isolate, this);
@@ -132,6 +130,10 @@ bool Expression::ToBooleanIsFalse() const {
return IsLiteral() && AsLiteral()->ToBooleanIsFalse();
}
+bool Expression::IsPrivateName() const {
+ return IsVariableProxy() && AsVariableProxy()->IsPrivateName();
+}
+
bool Expression::IsValidReferenceExpression() const {
return IsProperty() ||
(IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression());
@@ -176,7 +178,7 @@ void VariableProxy::BindTo(Variable* var) {
set_var(var);
set_is_resolved();
var->set_is_used();
- if (is_assigned()) var->set_maybe_assigned();
+ if (is_assigned()) var->SetMaybeAssigned();
}
Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target,
@@ -601,8 +603,8 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
boilerplate_value = handle(Smi::kZero, isolate);
}
- kind = GetMoreGeneralElementsKind(kind,
- boilerplate_value->OptimalElementsKind());
+ kind = GetMoreGeneralElementsKind(
+ kind, boilerplate_value->OptimalElementsKind(isolate));
fixed_array->set(array_index, *boilerplate_value);
}
@@ -832,6 +834,9 @@ Call::CallType Call::GetCallType() const {
Property* property = expression()->AsProperty();
if (property != nullptr) {
+ if (property->IsPrivateReference()) {
+ return PRIVATE_CALL;
+ }
bool is_super = property->IsSuperAccess();
if (property->key()->IsPropertyName()) {
return is_super ? NAMED_SUPER_PROPERTY_CALL : NAMED_PROPERTY_CALL;
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 27d298c88e..bd52d1b2c0 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -147,7 +147,6 @@ class AstNode: public ZoneObject {
int position() const { return position_; }
#ifdef DEBUG
- void Print();
void Print(Isolate* isolate);
#endif // DEBUG
@@ -205,6 +204,9 @@ class Expression : public AstNode {
// True iff the expression is a valid reference expression.
bool IsValidReferenceExpression() const;
+ // True iff the expression is a private name.
+ bool IsPrivateName() const;
+
// Helpers for ToBoolean conversion.
bool ToBooleanIsTrue() const;
bool ToBooleanIsFalse() const;
@@ -1421,32 +1423,6 @@ class ObjectLiteral final : public AggregateLiteral {
: public BitField<bool, FastElementsField::kNext, 1> {};
};
-
-// A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable
- : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
- bool (*)(void*, void*),
- ZoneAllocationPolicy> {
- public:
- explicit AccessorTable(Zone* zone)
- : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
- bool (*)(void*, void*), ZoneAllocationPolicy>(
- Literal::Match, ZoneAllocationPolicy(zone)),
- zone_(zone) {}
-
- Iterator lookup(Literal* literal) {
- Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
- if (it->second == nullptr) {
- it->second = new (zone_) ObjectLiteral::Accessors();
- }
- return it;
- }
-
- private:
- Zone* zone_;
-};
-
-
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public AggregateLiteral {
@@ -1533,7 +1509,7 @@ class VariableProxy final : public Expression {
void set_is_assigned() {
bit_field_ = IsAssignedField::update(bit_field_, true);
if (is_resolved()) {
- var()->set_maybe_assigned();
+ var()->SetMaybeAssigned();
}
}
@@ -1635,11 +1611,12 @@ class VariableProxy final : public Expression {
// Otherwise, the assignment is to a non-property (a global, a local slot, a
// parameter slot, or a destructuring pattern).
enum AssignType {
- NON_PROPERTY,
- NAMED_PROPERTY,
- KEYED_PROPERTY,
- NAMED_SUPER_PROPERTY,
- KEYED_SUPER_PROPERTY
+ NON_PROPERTY, // destructuring
+ NAMED_PROPERTY, // obj.key
+ KEYED_PROPERTY, // obj[key]
+ NAMED_SUPER_PROPERTY, // super.key
+ KEYED_SUPER_PROPERTY, // super[key]
+ PRIVATE_METHOD // obj.#key: #key is a private method
};
class Property final : public Expression {
@@ -1650,10 +1627,19 @@ class Property final : public Expression {
Expression* key() const { return key_; }
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
+ bool IsPrivateReference() const { return key()->IsPrivateName(); }
// Returns the properties assign type.
static AssignType GetAssignType(Property* property) {
if (property == nullptr) return NON_PROPERTY;
+ if (property->IsPrivateReference()) {
+ DCHECK(!property->IsSuperAccess());
+ VariableProxy* proxy = property->key()->AsVariableProxy();
+ DCHECK_NOT_NULL(proxy);
+ Variable* var = proxy->var();
+ // Use KEYED_PROPERTY for private fields.
+ return var->requires_brand_check() ? PRIVATE_METHOD : KEYED_PROPERTY;
+ }
bool super_access = property->IsSuperAccess();
return (property->key()->IsPropertyName())
? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY)
@@ -1715,6 +1701,7 @@ class Call final : public Expression {
KEYED_PROPERTY_CALL,
NAMED_SUPER_PROPERTY_CALL,
KEYED_SUPER_PROPERTY_CALL,
+ PRIVATE_CALL,
SUPER_CALL,
RESOLVED_PROPERTY_CALL,
OTHER_CALL
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 5e9bbc6332..261b72c352 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
-bool ModuleDescriptor::AstRawStringComparer::operator()(
+bool SourceTextModuleDescriptor::AstRawStringComparer::operator()(
const AstRawString* lhs, const AstRawString* rhs) const {
// Fast path for equal pointers: a pointer is not strictly less than itself.
if (lhs == rhs) return false;
@@ -27,12 +27,10 @@ bool ModuleDescriptor::AstRawStringComparer::operator()(
return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0;
}
-void ModuleDescriptor::AddImport(const AstRawString* import_name,
- const AstRawString* local_name,
- const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddImport(
+ const AstRawString* import_name, const AstRawString* local_name,
+ const AstRawString* module_request, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->import_name = import_name;
@@ -40,38 +38,34 @@ void ModuleDescriptor::AddImport(const AstRawString* import_name,
AddRegularImport(entry);
}
-void ModuleDescriptor::AddStarImport(const AstRawString* local_name,
- const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddStarImport(
+ const AstRawString* local_name, const AstRawString* module_request,
+ const Scanner::Location loc, const Scanner::Location specifier_loc,
+ Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddNamespaceImport(entry, zone);
}
-void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request,
- const Scanner::Location specifier_loc) {
+void SourceTextModuleDescriptor::AddEmptyImport(
+ const AstRawString* module_request, const Scanner::Location specifier_loc) {
AddModuleRequest(module_request, specifier_loc);
}
-
-void ModuleDescriptor::AddExport(
- const AstRawString* local_name, const AstRawString* export_name,
- Scanner::Location loc, Zone* zone) {
+void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name,
+ const AstRawString* export_name,
+ Scanner::Location loc, Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->export_name = export_name;
entry->local_name = local_name;
AddRegularExport(entry);
}
-void ModuleDescriptor::AddExport(const AstRawString* import_name,
- const AstRawString* export_name,
- const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddExport(
+ const AstRawString* import_name, const AstRawString* export_name,
+ const AstRawString* module_request, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
DCHECK_NOT_NULL(import_name);
DCHECK_NOT_NULL(export_name);
Entry* entry = new (zone) Entry(loc);
@@ -81,10 +75,9 @@ void ModuleDescriptor::AddExport(const AstRawString* import_name,
AddSpecialExport(entry, zone);
}
-void ModuleDescriptor::AddStarExport(const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddStarExport(
+ const AstRawString* module_request, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddSpecialExport(entry, zone);
@@ -98,24 +91,25 @@ Handle<Object> ToStringOrUndefined(Isolate* isolate, const AstRawString* s) {
}
} // namespace
-Handle<ModuleInfoEntry> ModuleDescriptor::Entry::Serialize(
+Handle<SourceTextModuleInfoEntry> SourceTextModuleDescriptor::Entry::Serialize(
Isolate* isolate) const {
CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier?
- return ModuleInfoEntry::New(
+ return SourceTextModuleInfoEntry::New(
isolate, ToStringOrUndefined(isolate, export_name),
ToStringOrUndefined(isolate, local_name),
ToStringOrUndefined(isolate, import_name), module_request, cell_index,
location.beg_pos, location.end_pos);
}
-Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
- Zone* zone) const {
+Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
+ Isolate* isolate, Zone* zone) const {
// We serialize regular exports in a way that lets us later iterate over their
// local names and for each local name immediately access all its export
// names. (Regular exports have neither import name nor module request.)
ZoneVector<Handle<Object>> data(
- ModuleInfo::kRegularExportLength * regular_exports_.size(), zone);
+ SourceTextModuleInfo::kRegularExportLength * regular_exports_.size(),
+ zone);
int index = 0;
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
@@ -130,12 +124,13 @@ Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
} while (next != regular_exports_.end() && next->first == it->first);
Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(count);
- data[index + ModuleInfo::kRegularExportLocalNameOffset] =
+ data[index + SourceTextModuleInfo::kRegularExportLocalNameOffset] =
it->second->local_name->string();
- data[index + ModuleInfo::kRegularExportCellIndexOffset] =
+ data[index + SourceTextModuleInfo::kRegularExportCellIndexOffset] =
handle(Smi::FromInt(it->second->cell_index), isolate);
- data[index + ModuleInfo::kRegularExportExportNamesOffset] = export_names;
- index += ModuleInfo::kRegularExportLength;
+ data[index + SourceTextModuleInfo::kRegularExportExportNamesOffset] =
+ export_names;
+ index += SourceTextModuleInfo::kRegularExportLength;
// Collect the export names.
int i = 0;
@@ -159,7 +154,7 @@ Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
return result;
}
-void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
+void SourceTextModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
Entry* entry = it->second;
DCHECK_NOT_NULL(entry->local_name);
@@ -191,14 +186,14 @@ void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
}
}
-ModuleDescriptor::CellIndexKind ModuleDescriptor::GetCellIndexKind(
- int cell_index) {
+SourceTextModuleDescriptor::CellIndexKind
+SourceTextModuleDescriptor::GetCellIndexKind(int cell_index) {
if (cell_index > 0) return kExport;
if (cell_index < 0) return kImport;
return kInvalid;
}
-void ModuleDescriptor::AssignCellIndices() {
+void SourceTextModuleDescriptor::AssignCellIndices() {
int export_index = 1;
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
auto current_key = it->first;
@@ -230,10 +225,11 @@ void ModuleDescriptor::AssignCellIndices() {
namespace {
-const ModuleDescriptor::Entry* BetterDuplicate(
- const ModuleDescriptor::Entry* candidate,
- ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*>& export_names,
- const ModuleDescriptor::Entry* current_duplicate) {
+const SourceTextModuleDescriptor::Entry* BetterDuplicate(
+ const SourceTextModuleDescriptor::Entry* candidate,
+ ZoneMap<const AstRawString*, const SourceTextModuleDescriptor::Entry*>&
+ export_names,
+ const SourceTextModuleDescriptor::Entry* current_duplicate) {
DCHECK_NOT_NULL(candidate->export_name);
DCHECK(candidate->location.IsValid());
auto insert_result =
@@ -249,11 +245,11 @@ const ModuleDescriptor::Entry* BetterDuplicate(
} // namespace
-const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport(
- Zone* zone) const {
- const ModuleDescriptor::Entry* duplicate = nullptr;
- ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*> export_names(
- zone);
+const SourceTextModuleDescriptor::Entry*
+SourceTextModuleDescriptor::FindDuplicateExport(Zone* zone) const {
+ const SourceTextModuleDescriptor::Entry* duplicate = nullptr;
+ ZoneMap<const AstRawString*, const SourceTextModuleDescriptor::Entry*>
+ export_names(zone);
for (const auto& elem : regular_exports_) {
duplicate = BetterDuplicate(elem.second, export_names, duplicate);
}
@@ -264,9 +260,9 @@ const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport(
return duplicate;
}
-bool ModuleDescriptor::Validate(ModuleScope* module_scope,
- PendingCompilationErrorHandler* error_handler,
- Zone* zone) {
+bool SourceTextModuleDescriptor::Validate(
+ ModuleScope* module_scope, PendingCompilationErrorHandler* error_handler,
+ Zone* zone) {
DCHECK_EQ(this, module_scope->module());
DCHECK_NOT_NULL(error_handler);
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index c3aa2bd0ad..4921d41932 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -13,13 +13,13 @@ namespace internal {
class AstRawString;
-class ModuleInfo;
-class ModuleInfoEntry;
+class SourceTextModuleInfo;
+class SourceTextModuleInfoEntry;
class PendingCompilationErrorHandler;
-class ModuleDescriptor : public ZoneObject {
+class SourceTextModuleDescriptor : public ZoneObject {
public:
- explicit ModuleDescriptor(Zone* zone)
+ explicit SourceTextModuleDescriptor(Zone* zone)
: module_requests_(zone),
special_exports_(zone),
namespace_imports_(zone),
@@ -84,9 +84,9 @@ class ModuleDescriptor : public ZoneObject {
const AstRawString* import_name;
// The module_request value records the order in which modules are
- // requested. It also functions as an index into the ModuleInfo's array of
- // module specifiers and into the Module's array of requested modules. A
- // negative value means no module request.
+ // requested. It also functions as an index into the SourceTextModuleInfo's
+ // array of module specifiers and into the Module's array of requested
+ // modules. A negative value means no module request.
int module_request;
// Import/export entries that are associated with a MODULE-allocated
@@ -107,7 +107,7 @@ class ModuleDescriptor : public ZoneObject {
module_request(-1),
cell_index(0) {}
- Handle<ModuleInfoEntry> Serialize(Isolate* isolate) const;
+ Handle<SourceTextModuleInfoEntry> Serialize(Isolate* isolate) const;
};
enum CellIndexKind { kInvalid, kExport, kImport };
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index eca091d61f..c0fe3baff3 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -1278,14 +1278,24 @@ void AstPrinter::VisitProperty(Property* node) {
IndentedScope indent(this, buf.begin(), node->position());
Visit(node->obj());
- AssignType property_kind = Property::GetAssignType(node);
- if (property_kind == NAMED_PROPERTY ||
- property_kind == NAMED_SUPER_PROPERTY) {
- PrintLiteralIndented("NAME", node->key()->AsLiteral(), false);
- } else {
- DCHECK(property_kind == KEYED_PROPERTY ||
- property_kind == KEYED_SUPER_PROPERTY);
- PrintIndentedVisit("KEY", node->key());
+ AssignType type = Property::GetAssignType(node);
+ switch (type) {
+ case NAMED_PROPERTY:
+ case NAMED_SUPER_PROPERTY: {
+ PrintLiteralIndented("NAME", node->key()->AsLiteral(), false);
+ break;
+ }
+ case PRIVATE_METHOD: {
+ PrintIndentedVisit("PRIVATE_METHOD", node->key());
+ break;
+ }
+ case KEYED_PROPERTY:
+ case KEYED_SUPER_PROPERTY: {
+ PrintIndentedVisit("KEY", node->key());
+ break;
+ }
+ case NON_PROPERTY:
+ UNREACHABLE();
}
}
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index e45303c64b..237d98ec60 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -9,7 +9,7 @@
#include "src/ast/ast.h"
#include "src/base/optional.h"
#include "src/builtins/accessors.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/module-inl.h"
@@ -40,6 +40,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag,
+ RequiresBrandCheckFlag requires_brand_check,
bool* was_added) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
@@ -51,8 +52,9 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
if (*was_added) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
- Variable* variable = new (zone) Variable(
- scope, name, mode, kind, initialization_flag, maybe_assigned_flag);
+ Variable* variable =
+ new (zone) Variable(scope, name, mode, kind, initialization_flag,
+ maybe_assigned_flag, requires_brand_check);
p->value = variable;
}
return reinterpret_cast<Variable*>(p->value);
@@ -128,7 +130,7 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule),
module_descriptor_(new (avfactory->zone())
- ModuleDescriptor(avfactory->zone())) {
+ SourceTextModuleDescriptor(avfactory->zone())) {
set_language_mode(LanguageMode::kStrict);
DeclareThis(avfactory);
}
@@ -262,7 +264,6 @@ void Scope::SetDefaults() {
is_debug_evaluate_scope_ = false;
inner_scope_calls_eval_ = false;
- force_context_allocation_ = false;
force_context_allocation_for_parameters_ = false;
is_declaration_scope_ = false;
@@ -506,8 +507,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(is_being_lazily_parsed_);
bool was_added;
Variable* var = DeclareVariableName(name, VariableMode::kVar, &was_added);
- if (sloppy_block_function->init() == Token::ASSIGN)
- var->set_maybe_assigned();
+ if (sloppy_block_function->init() == Token::ASSIGN) {
+ var->SetMaybeAssigned();
+ }
}
}
}
@@ -785,11 +787,13 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck;
{
location = VariableLocation::CONTEXT;
index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ &init_flag, &maybe_assigned_flag,
+ &requires_brand_check);
found = index >= 0;
}
@@ -814,9 +818,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
}
bool was_added;
- Variable* var =
- cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
- init_flag, maybe_assigned_flag, &was_added);
+ Variable* var = cache->variables_.Declare(
+ zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag,
+ requires_brand_check, &was_added);
DCHECK(was_added);
var->AllocateTo(location, index);
return var;
@@ -889,7 +893,7 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// assigned because they might be accessed by a lazily parsed top-level
// function, which, for efficiency, we preparse without variable tracking.
if (is_script_scope() || is_module_scope()) {
- if (mode != VariableMode::kConst) var->set_maybe_assigned();
+ if (mode != VariableMode::kConst) var->SetMaybeAssigned();
var->set_is_used();
}
@@ -938,7 +942,7 @@ Variable* Scope::DeclareVariable(
DCHECK(*was_added);
}
} else {
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
if (V8_UNLIKELY(IsLexicalVariableMode(mode) ||
IsLexicalVariableMode(var->mode()))) {
// The name was declared in this scope before; check for conflicting
@@ -1009,7 +1013,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
}
// Sloppy block function redefinition.
}
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
}
var->set_is_used();
return var;
@@ -1040,7 +1044,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
bool was_added;
return cache->variables_.Declare(
zone(), this, name, VariableMode::kDynamicGlobal, kind,
- kCreatedInitialized, kNotAssigned, &was_added);
+ kCreatedInitialized, kNotAssigned, kNoBrandCheck, &was_added);
// TODO(neis): Mark variable as maybe-assigned?
}
@@ -1063,7 +1067,7 @@ Variable* Scope::NewTemporary(const AstRawString* name,
Variable* var = new (zone()) Variable(scope, name, VariableMode::kTemporary,
NORMAL_VARIABLE, kCreatedInitialized);
scope->AddLocal(var);
- if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned();
+ if (maybe_assigned == kMaybeAssigned) var->SetMaybeAssigned();
return var;
}
@@ -1401,7 +1405,7 @@ void Scope::AnalyzePartially(DeclarationScope* max_outer_scope,
}
} else {
var->set_is_used();
- if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (proxy->is_assigned()) var->SetMaybeAssigned();
}
}
@@ -1592,6 +1596,10 @@ void PrintVar(int indent, Variable* var) {
if (comma) PrintF(", ");
PrintF("hole initialization elided");
}
+ if (var->requires_brand_check()) {
+ if (comma) PrintF(", ");
+ PrintF("requires brand check");
+ }
PrintF("\n");
}
@@ -1766,9 +1774,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
// Declare a new non-local.
DCHECK(IsDynamicVariableMode(mode));
bool was_added;
- Variable* var =
- variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
- kCreatedInitialized, kNotAssigned, &was_added);
+ Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
+ kCreatedInitialized, kNotAssigned,
+ kNoBrandCheck, &was_added);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1);
return var;
@@ -1879,11 +1887,14 @@ Variable* Scope::LookupWith(VariableProxy* proxy, Scope* scope,
DCHECK(!scope->already_resolved_);
var->set_is_used();
var->ForceContextAllocation();
- if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (proxy->is_assigned()) var->SetMaybeAssigned();
}
if (entry_point != nullptr) entry_point->variables_.Remove(var);
Scope* target = entry_point == nullptr ? scope : entry_point;
- return target->NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ Variable* dynamic =
+ target->NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ dynamic->set_local_if_not_shadowed(var);
+ return dynamic;
}
Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
@@ -1912,7 +1923,7 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
// script scope are always dynamic.
if (var->IsGlobalObjectProperty()) {
Scope* target = entry_point == nullptr ? scope : entry_point;
- return target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
+ var = target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
}
if (var->is_dynamic()) return var;
@@ -2010,7 +2021,7 @@ void Scope::ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope,
var->set_is_used();
if (!var->is_dynamic()) {
var->ForceContextAllocation();
- if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (proxy->is_assigned()) var->SetMaybeAssigned();
return;
}
}
@@ -2054,7 +2065,7 @@ bool Scope::MustAllocate(Variable* var) {
if (!var->raw_name()->IsEmpty() &&
(inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) {
var->set_is_used();
- if (inner_scope_calls_eval_) var->set_maybe_assigned();
+ if (inner_scope_calls_eval_) var->SetMaybeAssigned();
}
DCHECK(!var->has_forced_context_allocation() || var->is_used());
// Global variables do not need to be allocated.
@@ -2124,7 +2135,7 @@ void DeclarationScope::AllocateParameterLocals() {
DCHECK_EQ(this, var->scope());
if (has_mapped_arguments) {
var->set_is_used();
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
var->ForceContextAllocation();
}
AllocateParameter(var, i);
@@ -2315,12 +2326,13 @@ int Scope::ContextLocalCount() const {
(is_function_var_in_context ? 1 : 0);
}
-Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
- bool* was_added) {
+Variable* ClassScope::DeclarePrivateName(
+ const AstRawString* name, RequiresBrandCheckFlag requires_brand_check,
+ bool* was_added) {
Variable* result = EnsureRareData()->private_name_map.Declare(
zone(), this, name, VariableMode::kConst, NORMAL_VARIABLE,
InitializationFlag::kNeedsInitialization,
- MaybeAssignedFlag::kMaybeAssigned, was_added);
+ MaybeAssignedFlag::kMaybeAssigned, requires_brand_check, was_added);
if (*was_added) {
locals_.Add(result);
}
@@ -2404,8 +2416,10 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ RequiresBrandCheckFlag requires_brand_check;
+ int index =
+ ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag,
+ &maybe_assigned_flag, &requires_brand_check);
if (index < 0) {
return nullptr;
}
@@ -2417,7 +2431,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
// Add the found private name to the map to speed up subsequent
// lookups for the same name.
bool was_added;
- Variable* var = DeclarePrivateName(name, &was_added);
+ Variable* var = DeclarePrivateName(name, requires_brand_check, &was_added);
DCHECK(was_added);
var->AllocateTo(VariableLocation::CONTEXT, index);
return var;
@@ -2454,8 +2468,7 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) {
Scanner::Location loc = proxy->location();
info->pending_error_handler()->ReportMessageAt(
loc.beg_pos, loc.end_pos,
- MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name(),
- kSyntaxError);
+ MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name());
return false;
} else {
var->set_is_used();
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 1feaad2a90..932d5c70b9 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -41,7 +41,9 @@ class VariableMap : public ZoneHashMap {
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag, bool* was_added);
+ MaybeAssignedFlag maybe_assigned_flag,
+ RequiresBrandCheckFlag requires_brand_check,
+ bool* was_added);
V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@@ -556,7 +558,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
MaybeAssignedFlag maybe_assigned_flag, bool* was_added) {
Variable* result =
variables_.Declare(zone, this, name, mode, kind, initialization_flag,
- maybe_assigned_flag, was_added);
+ maybe_assigned_flag, kNoBrandCheck, was_added);
if (*was_added) locals_.Add(result);
return result;
}
@@ -712,7 +714,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// True if one of the inner scopes or the scope itself calls eval.
bool inner_scope_calls_eval_ : 1;
- bool force_context_allocation_ : 1;
bool force_context_allocation_for_parameters_ : 1;
// True if it holds 'var' declarations.
@@ -1155,14 +1156,14 @@ class ModuleScope final : public DeclarationScope {
AstValueFactory* avfactory);
// Returns nullptr in a deserialized scope.
- ModuleDescriptor* module() const { return module_descriptor_; }
+ SourceTextModuleDescriptor* module() const { return module_descriptor_; }
// Set MODULE as VariableLocation for all variables that will live in a
// module's export table.
void AllocateModuleVariables();
private:
- ModuleDescriptor* const module_descriptor_;
+ SourceTextModuleDescriptor* const module_descriptor_;
};
class V8_EXPORT_PRIVATE ClassScope : public Scope {
@@ -1174,7 +1175,9 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
// Declare a private name in the private name map and add it to the
// local variables of this scope.
- Variable* DeclarePrivateName(const AstRawString* name, bool* was_added);
+ Variable* DeclarePrivateName(const AstRawString* name,
+ RequiresBrandCheckFlag requires_brand_check,
+ bool* was_added);
void AddUnresolvedPrivateName(VariableProxy* proxy);
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index df40fee754..7805fa20c8 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -21,7 +21,8 @@ class Variable final : public ZoneObject {
public:
Variable(Scope* scope, const AstRawString* name, VariableMode mode,
VariableKind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned)
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
+ RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck)
: scope_(scope),
name_(name),
local_if_not_shadowed_(nullptr),
@@ -31,6 +32,7 @@ class Variable final : public ZoneObject {
bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
InitializationFlagField::encode(initialization_flag) |
VariableModeField::encode(mode) |
+ RequiresBrandCheckField::encode(requires_brand_check) |
IsUsedField::encode(false) |
ForceContextAllocationField::encode(false) |
ForceHoleInitializationField::encode(false) |
@@ -69,8 +71,31 @@ class Variable final : public ZoneObject {
MaybeAssignedFlag maybe_assigned() const {
return MaybeAssignedFlagField::decode(bit_field_);
}
- void set_maybe_assigned() {
- bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned);
+ void SetMaybeAssigned() {
+ // If this variable is dynamically shadowing another variable, then that
+ // variable could also be assigned (in the non-shadowing case).
+ if (has_local_if_not_shadowed()) {
+ // Avoid repeatedly marking the same tree of variables by only recursing
+ // when this variable's maybe_assigned status actually changes.
+ if (!maybe_assigned()) {
+ local_if_not_shadowed()->SetMaybeAssigned();
+ }
+ DCHECK(local_if_not_shadowed()->maybe_assigned());
+ }
+ set_maybe_assigned();
+ }
+
+ RequiresBrandCheckFlag get_requires_brand_check_flag() const {
+ return RequiresBrandCheckField::decode(bit_field_);
+ }
+
+ bool requires_brand_check() const {
+ return get_requires_brand_check_flag() == kRequiresBrandCheck;
+ }
+
+ void set_requires_brand_check() {
+ bit_field_ =
+ RequiresBrandCheckField::update(bit_field_, kRequiresBrandCheck);
}
int initializer_position() { return initializer_position_; }
@@ -143,11 +168,16 @@ class Variable final : public ZoneObject {
}
Variable* local_if_not_shadowed() const {
- DCHECK(mode() == VariableMode::kDynamicLocal &&
- local_if_not_shadowed_ != nullptr);
+ DCHECK((mode() == VariableMode::kDynamicLocal ||
+ mode() == VariableMode::kDynamic) &&
+ has_local_if_not_shadowed());
return local_if_not_shadowed_;
}
+ bool has_local_if_not_shadowed() const {
+ return local_if_not_shadowed_ != nullptr;
+ }
+
void set_local_if_not_shadowed(Variable* local) {
local_if_not_shadowed_ = local;
}
@@ -200,15 +230,19 @@ class Variable final : public ZoneObject {
const AstRawString* name_;
// If this field is set, this variable references the stored locally bound
- // variable, but it might be shadowed by variable bindings introduced by
- // sloppy 'eval' calls between the reference scope (inclusive) and the
- // binding scope (exclusive).
+ // variable, but it might be shadowed by variable bindings introduced by with
+ // blocks or sloppy 'eval' calls between the reference scope (inclusive) and
+ // the binding scope (exclusive).
Variable* local_if_not_shadowed_;
Variable* next_;
int index_;
int initializer_position_;
uint16_t bit_field_;
+ void set_maybe_assigned() {
+ bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned);
+ }
+
class VariableModeField : public BitField16<VariableMode, 0, 3> {};
class VariableKindField
: public BitField16<VariableKind, VariableModeField::kNext, 3> {};
@@ -225,6 +259,9 @@ class Variable final : public ZoneObject {
class MaybeAssignedFlagField
: public BitField16<MaybeAssignedFlag,
ForceHoleInitializationField::kNext, 1> {};
+ class RequiresBrandCheckField
+ : public BitField16<RequiresBrandCheckFlag, MaybeAssignedFlagField::kNext,
+ 1> {};
Variable** next() { return &next_; }
friend List;
friend base::ThreadedListTraits<Variable>;
diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h
index 92c500085d..f684b52ccb 100644
--- a/deps/v8/src/base/adapters.h
+++ b/deps/v8/src/base/adapters.h
@@ -45,7 +45,7 @@ class ReversedAdapter {
// // iterates through v from back to front
// }
template <typename T>
-ReversedAdapter<T> Reversed(T& t) {
+ReversedAdapter<T> Reversed(T&& t) {
return ReversedAdapter<T>(t);
}
diff --git a/deps/v8/src/base/lsan.h b/deps/v8/src/base/lsan.h
new file mode 100644
index 0000000000..fd9bbd21c1
--- /dev/null
+++ b/deps/v8/src/base/lsan.h
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// LeakSanitizer support.
+
+#ifndef V8_BASE_LSAN_H_
+#define V8_BASE_LSAN_H_
+
+#include <type_traits>
+
+// There is no compile time flag for LSan, to enable this whenever ASan is
+// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'.
+// On windows, LSan is not implemented yet, so disable it there.
+#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
+
+#include <sanitizer/lsan_interface.h>
+
+#define LSAN_IGNORE_OBJECT(ptr) __lsan_ignore_object(ptr)
+
+#else // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
+
+#define LSAN_IGNORE_OBJECT(ptr) \
+ static_assert(std::is_convertible<decltype(ptr), const void*>::value, \
+ "LSAN_IGNORE_OBJECT can only be used with pointer types")
+
+#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
+
+#endif // V8_BASE_LSAN_H_
diff --git a/deps/v8/src/common/v8memory.h b/deps/v8/src/base/memory.h
index 02ba2de848..087f67291d 100644
--- a/deps/v8/src/common/v8memory.h
+++ b/deps/v8/src/base/memory.h
@@ -2,14 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMMON_V8MEMORY_H_
-#define V8_COMMON_V8MEMORY_H_
+#ifndef V8_BASE_MEMORY_H_
+#define V8_BASE_MEMORY_H_
#include "src/base/macros.h"
-#include "src/common/globals.h"
namespace v8 {
-namespace internal {
+namespace base {
+
+using Address = uintptr_t;
+using byte = uint8_t;
// Memory provides an interface to 'raw' memory. It encapsulates the casts
// that typically are needed when incompatible pointer types are used.
@@ -39,22 +41,6 @@ static inline void WriteUnalignedValue(Address p, V value) {
memcpy(reinterpret_cast<void*>(p), &value, sizeof(V));
}
-static inline uint16_t ReadUnalignedUInt16(Address p) {
- return ReadUnalignedValue<uint16_t>(p);
-}
-
-static inline void WriteUnalignedUInt16(Address p, uint16_t value) {
- WriteUnalignedValue(p, value);
-}
-
-static inline uint32_t ReadUnalignedUInt32(Address p) {
- return ReadUnalignedValue<uint32_t>(p);
-}
-
-static inline void WriteUnalignedUInt32(Address p, uint32_t value) {
- WriteUnalignedValue(p, value);
-}
-
template <typename V>
static inline V ReadLittleEndianValue(Address p) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
@@ -93,7 +79,7 @@ static inline void WriteLittleEndianValue(V* p, V value) {
WriteLittleEndianValue<V>(reinterpret_cast<Address>(p), value);
}
-} // namespace internal
+} // namespace base
} // namespace v8
-#endif // V8_COMMON_V8MEMORY_H_
+#endif // V8_BASE_MEMORY_H_
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index 7f64f4dedb..bf5455c9af 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
hpayer@chromium.org
mlippautz@chromium.org
ulan@chromium.org
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 11499f572c..fa175c3917 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -48,7 +48,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
size_t request_size = size + (alignment - page_size);
zx_handle_t vmo;
- if (zx_vmo_create(request_size, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) {
+ if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
@@ -152,7 +152,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
const auto kMicrosPerSecond = 1000000ULL;
zx_time_t nanos_since_thread_started;
zx_status_t status =
- zx_clock_get_new(ZX_CLOCK_THREAD, &nanos_since_thread_started);
+ zx_clock_get(ZX_CLOCK_THREAD, &nanos_since_thread_started);
if (status != ZX_OK) {
return -1;
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 7f4ce192db..6da83d7e02 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -199,6 +199,12 @@ void* OS::GetRandomMmapAddr() {
MutexGuard guard(rng_mutex.Pointer());
GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
}
+#if defined(__APPLE__)
+#if V8_TARGET_ARCH_ARM64
+ DCHECK_EQ(1 << 14, AllocatePageSize());
+ raw_addr = RoundDown(raw_addr, 1 << 14);
+#endif
+#endif
#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
// If random hint addresses interfere with address ranges hard coded in
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
index bb024ca87e..b11dfb86b4 100644
--- a/deps/v8/src/base/small-vector.h
+++ b/deps/v8/src/base/small-vector.h
@@ -88,22 +88,29 @@ class SmallVector {
DCHECK_NE(0, size());
return end_[-1];
}
+ const T& back() const {
+ DCHECK_NE(0, size());
+ return end_[-1];
+ }
T& operator[](size_t index) {
DCHECK_GT(size(), index);
return begin_[index];
}
- const T& operator[](size_t index) const {
+ const T& at(size_t index) const {
DCHECK_GT(size(), index);
return begin_[index];
}
+ const T& operator[](size_t index) const { return at(index); }
+
template <typename... Args>
void emplace_back(Args&&... args) {
- if (V8_UNLIKELY(end_ == end_of_storage_)) Grow();
- new (end_) T(std::forward<Args>(args)...);
- ++end_;
+ T* end = end_;
+ if (V8_UNLIKELY(end == end_of_storage_)) end = Grow();
+ new (end) T(std::forward<Args>(args)...);
+ end_ = end + 1;
}
void pop_back(size_t count = 1) {
@@ -135,7 +142,12 @@ class SmallVector {
typename std::aligned_storage<sizeof(T) * kInlineSize, alignof(T)>::type
inline_storage_;
- void Grow(size_t min_capacity = 0) {
+ // Grows the backing store by a factor of two. Returns the new end of the used
+ // storage (this reduces binary size).
+ V8_NOINLINE T* Grow() { return Grow(0); }
+
+ // Grows the backing store by a factor of two, and at least to {min_capacity}.
+ V8_NOINLINE T* Grow(size_t min_capacity) {
size_t in_use = end_ - begin_;
size_t new_capacity =
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
@@ -145,6 +157,7 @@ class SmallVector {
begin_ = new_storage;
end_ = new_storage + in_use;
end_of_storage_ = new_storage + new_capacity;
+ return end_;
}
bool is_big() const { return begin_ != inline_storage_begin(); }
diff --git a/deps/v8/src/base/vlq-base64.cc b/deps/v8/src/base/vlq-base64.cc
new file mode 100644
index 0000000000..62e63ac872
--- /dev/null
+++ b/deps/v8/src/base/vlq-base64.cc
@@ -0,0 +1,58 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <climits>
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/vlq-base64.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+constexpr int8_t kCharToDigit[] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 0x3e, -1, -1, -1, 0x3f,
+ 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, -1, -1,
+ -1, -1, -1, -1, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
+ 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12,
+ 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -1, -1, -1, -1, -1,
+ -1, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24,
+ 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32, 0x33, -1, -1, -1, -1, -1};
+
+constexpr uint32_t kContinueShift = 5;
+constexpr uint32_t kContinueMask = 1 << kContinueShift;
+constexpr uint32_t kDataMask = kContinueMask - 1;
+
+int8_t charToDigitDecode(uint8_t c) { return c < 128u ? kCharToDigit[c] : -1; }
+} // namespace
+
+int8_t charToDigitDecodeForTesting(uint8_t c) { return charToDigitDecode(c); }
+
+int32_t VLQBase64Decode(const char* start, size_t sz, size_t* pos) {
+ uint32_t res = 0;
+ uint64_t shift = 0;
+ int32_t digit;
+
+ do {
+ if (*pos >= sz) {
+ return std::numeric_limits<int32_t>::min();
+ }
+ digit = static_cast<int>(charToDigitDecode(start[*pos]));
+ bool is_last_byte = (shift + kContinueShift >= 32);
+ if (digit == -1 || (is_last_byte && (digit >> 2) != 0)) {
+ return std::numeric_limits<int32_t>::min();
+ }
+ res += (digit & kDataMask) << shift;
+ shift += kContinueShift;
+ (*pos)++;
+ } while (digit & kContinueMask);
+ return (res & 1) ? -static_cast<int32_t>(res >> 1) : (res >> 1);
+}
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/vlq-base64.h b/deps/v8/src/base/vlq-base64.h
new file mode 100644
index 0000000000..5d8633798b
--- /dev/null
+++ b/deps/v8/src/base/vlq-base64.h
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_VLQ_BASE64_H_
+#define V8_BASE_VLQ_BASE64_H_
+
+#include <string>
+
+#include "src/base/base-export.h"
+
+namespace v8 {
+namespace base {
+V8_BASE_EXPORT int8_t charToDigitDecodeForTesting(uint8_t c);
+
+// Decodes a VLQ-Base64-encoded string into 32bit digits. A valid return value
+// is within [-2^31+1, 2^31-1]. This function returns -2^31
+// (std::numeric_limits<int32_t>::min()) when bad input s is passed.
+V8_BASE_EXPORT int32_t VLQBase64Decode(const char* start, size_t sz,
+ size_t* pos);
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_VLQ_BASE64_H_
diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS
new file mode 100644
index 0000000000..450423f878
--- /dev/null
+++ b/deps/v8/src/builtins/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 25d37d73b4..ea6308622d 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -287,7 +287,8 @@ void Accessors::StringLengthGetter(
if (!value.IsString()) {
// Not a string value. That means that we either got a String wrapper or
// a Value with a String wrapper in its prototype chain.
- value = JSValue::cast(*Utils::OpenHandle(*info.Holder())).value();
+ value =
+ JSPrimitiveWrapper::cast(*Utils::OpenHandle(*info.Holder())).value();
}
Object result = Smi::FromInt(String::cast(value).length());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
@@ -305,7 +306,7 @@ Handle<AccessorInfo> Accessors::MakeStringLengthInfo(Isolate* isolate) {
static Handle<Object> GetFunctionPrototype(Isolate* isolate,
Handle<JSFunction> function) {
if (!function->has_prototype()) {
- Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
+ Handle<JSObject> proto = isolate->factory()->NewFunctionPrototype(function);
JSFunction::SetPrototype(function, proto);
}
return Handle<Object>(function->prototype(), isolate);
diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq
index add66917c0..6df5f801a3 100644
--- a/deps/v8/src/builtins/arguments.tq
+++ b/deps/v8/src/builtins/arguments.tq
@@ -34,13 +34,13 @@ namespace arguments {
@export
macro GetArgumentsFrameAndCount(implicit context: Context)(f: JSFunction):
ArgumentsInfo {
- let frame: Frame = LoadParentFramePointer();
+ const frame: Frame = LoadParentFramePointer();
assert(frame.function == f);
const shared: SharedFunctionInfo = f.shared_function_info;
const formalParameterCount: bint =
Convert<bint>(Convert<int32>(shared.formal_parameter_count));
- let argumentCount: bint = formalParameterCount;
+ const argumentCount: bint = formalParameterCount;
const adaptor: ArgumentsAdaptorFrame =
Cast<ArgumentsAdaptorFrame>(frame.caller)
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 54c16932fa..9b9956b0fb 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -1093,11 +1093,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset));
+ BytecodeArray::kOsrNestingLevelOffset));
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@@ -1509,13 +1509,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ Pop(scratch);
+ Register builtin = temps.Acquire();
+ __ Pop(builtin);
__ add(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(lr);
- __ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ bx(builtin);
}
} // namespace
@@ -2577,7 +2580,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
- __ stop("Unexpected alignment");
+ __ stop();
__ bind(&alignment_as_expected);
}
}
@@ -2606,7 +2609,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CompareRoot(r3, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ b(eq, &okay);
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2835,19 +2838,25 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address == r1 || function_address == r2);
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Move(r9, ExternalReference::is_profiling_address(isolate));
__ ldrb(r9, MemOperand(r9, 0));
__ cmp(r9, Operand(0));
- __ b(eq, &profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- __ Move(r3, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- __ Move(r3, function_address);
+ __ b(ne, &profiler_enabled);
+ __ Move(r9, ExternalReference::address_of_runtime_stats_flag());
+ __ ldr(r9, MemOperand(r9, 0));
+ __ cmp(r9, Operand(0));
+ __ b(ne, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ Move(r3, function_address);
+ __ b(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Move(r3, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index f81a1955ee..bcee8f0b5d 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -1201,10 +1201,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset));
+ BytecodeArray::kOsrNestingLevelOffset));
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
@@ -1683,18 +1683,20 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
- // Load builtin object.
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
UseScratchRegisterScope temps(masm);
Register builtin = temps.AcquireX();
- __ Ldr(builtin,
- MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
+ __ Ldr(
+ builtin,
+ MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinIndexOffset));
// Restore fp, lr.
__ Mov(sp, fp);
__ Pop(fp, lr);
- // Call builtin.
- __ JumpCodeObject(builtin);
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ Jump(builtin);
}
} // namespace
@@ -3400,16 +3402,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address.is(x1) || function_address.is(x2));
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Mov(x10, ExternalReference::is_profiling_address(isolate));
__ Ldrb(w10, MemOperand(x10));
- __ Cbz(w10, &profiler_disabled);
- __ Mov(x3, thunk_ref);
- __ B(&end_profiler_check);
-
- __ Bind(&profiler_disabled);
- __ Mov(x3, function_address);
+ __ Cbnz(w10, &profiler_enabled);
+ __ Mov(x10, ExternalReference::address_of_runtime_stats_flag());
+ __ Ldrsw(w10, MemOperand(x10));
+ __ Cbnz(w10, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ Mov(x3, function_address);
+ __ B(&end_profiler_check);
+ }
+ __ Bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Mov(x3, thunk_ref);
+ }
__ Bind(&end_profiler_check);
// Save the callee-save registers we are going to use.
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
index bfc95a28bf..94d871e8f7 100644
--- a/deps/v8/src/builtins/array-copywithin.tq
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -9,7 +9,7 @@ namespace array_copywithin {
// https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
transitioning javascript builtin ArrayPrototypeCopyWithin(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq
index 245b07556c..3451cd769b 100644
--- a/deps/v8/src/builtins/array-every.tq
+++ b/deps/v8/src/builtins/array-every.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArrayEveryLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayEveryLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -25,9 +26,10 @@ namespace array {
}
transitioning javascript builtin
- ArrayEveryLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayEveryLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ result: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -51,9 +53,9 @@ namespace array {
}
transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
- array: Object, o: JSReceiver, initialK: Number, length: Number,
- initialTo: Object): Object {
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _array: Object, o: JSReceiver, initialK: Number, length: Number,
+ _initialTo: Object): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Number = initialK; k < length; k++) {
@@ -88,7 +90,7 @@ namespace array {
labels Bailout(Smi) {
let k: Smi = 0;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- let fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ const fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
let fastOW = NewFastJSArrayWitness(fastO);
// Build a fast loop over the smi array.
@@ -109,12 +111,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.every
transitioning javascript builtin
- ArrayEvery(implicit context: Context)(receiver: Object, ...arguments):
+ ArrayEvery(js-implicit context: Context, receiver: Object)(...arguments):
Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.every');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -144,8 +144,5 @@ namespace array {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.every');
- }
}
}
diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq
index 4bf175a787..9acd0d04ee 100644
--- a/deps/v8/src/builtins/array-filter.tq
+++ b/deps/v8/src/builtins/array-filter.tq
@@ -4,9 +4,10 @@
namespace array_filter {
transitioning javascript builtin
- ArrayFilterLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object, initialTo: Object): Object {
+ ArrayFilterLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object, initialTo: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -27,9 +28,10 @@ namespace array_filter {
}
transitioning javascript builtin
- ArrayFilterLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object, valueK: Object, initialTo: Object,
+ ArrayFilterLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object, valueK: Object, initialTo: Object,
result: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -42,9 +44,9 @@ namespace array_filter {
const numberLength = Cast<Number>(length) otherwise unreachable;
// This custom lazy deopt point is right after the callback. filter() needs
- // to pick up at the next step, which is setting the callback result in
- // the output array. After incrementing k and to, we can glide into the loop
- // continuation builtin.
+ // to pick up at the next step, which is setting the callback
+ // result in the output array. After incrementing k and to, we can glide
+ // into the loop continuation builtin.
if (ToBoolean(result)) {
FastCreateDataProperty(outputArray, numberTo, valueK);
numberTo = numberTo + 1;
@@ -58,7 +60,7 @@ namespace array_filter {
}
transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
array: JSReceiver, o: JSReceiver, initialK: Number, length: Number,
initialTo: Number): Object {
let to: Number = initialTo;
@@ -145,12 +147,10 @@ namespace array_filter {
// https://tc39.github.io/ecma262/#sec-array.prototype.filter
transitioning javascript builtin
- ArrayFilter(implicit context: Context)(receiver: Object, ...arguments):
+ ArrayFilter(js-implicit context: Context, receiver: Object)(...arguments):
Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.filter');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -199,8 +199,5 @@ namespace array_filter {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.filter');
- }
}
}
diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq
index 28223e4c49..ef54dd4666 100644
--- a/deps/v8/src/builtins/array-find.tq
+++ b/deps/v8/src/builtins/array-find.tq
@@ -4,8 +4,9 @@
namespace array_find {
transitioning javascript builtin
- ArrayFindLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayFindLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized find implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -24,9 +25,10 @@ namespace array_find {
}
transitioning javascript builtin
- ArrayFindLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayFindLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ _callback: Object, _thisArg: Object, _initialK: Object, _length: Object,
+ _result: Object): Object {
// This deopt continuation point is never actually called, it just
// exists to make stack traces correct from a ThrowTypeError if the
// callback was found to be non-callable.
@@ -37,15 +39,16 @@ namespace array_find {
// happens right after the callback and it's returned value must be handled
// before iteration continues.
transitioning javascript builtin
- ArrayFindLoopAfterCallbackLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, foundValue: Object, isFound: Object): Object {
+ ArrayFindLoopAfterCallbackLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ foundValue: Object, isFound: Object): Object {
// All continuation points in the optimized find implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// This custom lazy deopt point is right after the callback. find() needs
@@ -62,7 +65,7 @@ namespace array_find {
}
transitioning builtin ArrayFindLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
o: JSReceiver, initialK: Number, length: Number): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
@@ -116,12 +119,10 @@ namespace array_find {
// https://tc39.github.io/ecma262/#sec-array.prototype.find
transitioning javascript builtin
- ArrayPrototypeFind(implicit context: Context)(receiver: Object, ...arguments):
- Object {
+ ArrayPrototypeFind(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.find');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -151,8 +152,5 @@ namespace array_find {
label NotCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.find');
- }
}
}
diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq
index 00d8378dfa..5a8bb85fba 100644
--- a/deps/v8/src/builtins/array-findindex.tq
+++ b/deps/v8/src/builtins/array-findindex.tq
@@ -4,8 +4,9 @@
namespace array_findindex {
transitioning javascript builtin
- ArrayFindIndexLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayFindIndexLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized findIndex implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -24,9 +25,10 @@ namespace array_findindex {
}
transitioning javascript builtin
- ArrayFindIndexLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayFindIndexLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ _callback: Object, _thisArg: Object, _initialK: Object, _length: Object,
+ _result: Object): Object {
// This deopt continuation point is never actually called, it just
// exists to make stack traces correct from a ThrowTypeError if the
// callback was found to be non-callable.
@@ -37,16 +39,16 @@ namespace array_findindex {
// happens right after the callback and it's returned value must be handled
// before iteration continues.
transitioning javascript builtin
- ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(implicit context:
- Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, foundValue: Object, isFound: Object): Object {
+ ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ foundValue: Object, isFound: Object): Object {
// All continuation points in the optimized findIndex implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// This custom lazy deopt point is right after the callback. find() needs
@@ -64,7 +66,7 @@ namespace array_findindex {
transitioning builtin ArrayFindIndexLoopContinuation(implicit context:
Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
o: JSReceiver, initialK: Number, length: Number): Number {
// 5. Let k be 0.
// 6. Repeat, while k < len
@@ -118,12 +120,10 @@ namespace array_findindex {
// https://tc39.github.io/ecma262/#sec-array.prototype.findIndex
transitioning javascript builtin
- ArrayPrototypeFindIndex(implicit context:
- Context)(receiver: Object, ...arguments): Object {
+ ArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.findIndex');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -154,8 +154,5 @@ namespace array_findindex {
label NotCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.findIndex');
- }
}
}
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index d362e95950..f52d944291 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -4,8 +4,9 @@
namespace array_foreach {
transitioning javascript builtin
- ArrayForEachLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayForEachLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized forEach implemntation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -21,9 +22,10 @@ namespace array_foreach {
}
transitioning javascript builtin
- ArrayForEachLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayForEachLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ _result: Object): Object {
// All continuation points in the optimized forEach implemntation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -38,9 +40,9 @@ namespace array_foreach {
}
transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
- array: Object, o: JSReceiver, initialK: Number, len: Number,
- to: Object): Object {
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _array: Object, o: JSReceiver, initialK: Number, len: Number,
+ _to: Object): Object {
// variables {array} and {to} are ignored.
// 5. Let k be 0.
@@ -72,7 +74,7 @@ namespace array_foreach {
labels Bailout(Smi) {
let k: Smi = 0;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
let fastOW = NewFastJSArrayWitness(fastO);
// Build a fast loop over the smi array.
@@ -90,11 +92,10 @@ namespace array_foreach {
// https://tc39.github.io/ecma262/#sec-array.prototype.foreach
transitioning javascript builtin
- ArrayForEach(context: Context, receiver: Object, ...arguments): Object {
+ ArrayForEach(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.forEach');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -127,8 +128,5 @@ namespace array_foreach {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.forEach');
- }
}
}
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 72e1a3661e..c04233b222 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -37,7 +37,7 @@ namespace array_join {
const array: JSArray = UnsafeCast<JSArray>(receiver);
const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
const element: Object = fixedArray.objects[UnsafeCast<Smi>(k)];
- return element == Hole ? kEmptyString : element;
+ return element == TheHole ? kEmptyString : element;
}
LoadJoinElement<array::FastDoubleElements>(
@@ -56,7 +56,7 @@ namespace array_join {
assert(!IsDetachedBuffer(typedArray.buffer));
return typed_array::LoadFixedTypedArrayElementAsTagged(
typedArray.data_ptr, UnsafeCast<Smi>(k),
- typed_array::KindForArrayType<T>(), SMI_PARAMETERS);
+ typed_array::KindForArrayType<T>());
}
transitioning builtin ConvertToLocaleString(
@@ -103,8 +103,8 @@ namespace array_join {
}
CannotUseSameArrayAccessor<JSTypedArray>(implicit context: Context)(
- loadFn: LoadJoinElementFn, receiver: JSReceiver, initialMap: Map,
- initialLen: Number): never
+ _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map,
+ _initialLen: Number): never
labels Cannot, Can {
const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
if (IsDetachedBuffer(typedArray.buffer)) goto Cannot;
@@ -246,7 +246,7 @@ namespace array_join {
case (nofSeparators: Number): {
return StringRepeat(context, sep, nofSeparators);
}
- case (obj: Object): {
+ case (Object): {
unreachable;
}
}
@@ -448,7 +448,7 @@ namespace array_join {
const previouslyVisited: Object = stack.objects[i];
// Add `receiver` to the first open slot
- if (previouslyVisited == Hole) {
+ if (previouslyVisited == TheHole) {
stack.objects[i] = receiver;
return True;
}
@@ -473,7 +473,7 @@ namespace array_join {
try {
const stack: FixedArray = LoadJoinStack()
otherwise IfUninitialized;
- if (stack.objects[0] == Hole) {
+ if (stack.objects[0] == TheHole) {
stack.objects[0] = receiver;
} else if (JoinStackPush(stack, receiver) == False)
deferred {
@@ -504,7 +504,7 @@ namespace array_join {
SetJoinStack(newStack);
}
else {
- stack.objects[i] = Hole;
+ stack.objects[i] = TheHole;
}
return Undefined;
}
@@ -521,7 +521,7 @@ namespace array_join {
// Builtin call was not nested (receiver is the first entry) and
// did not contain other nested arrays that expanded the stack.
if (stack.objects[0] == receiver && len == kMinJoinStackSize) {
- StoreFixedArrayElement(stack, 0, Hole, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER);
} else
deferred {
JoinStackPop(stack, receiver);
@@ -535,7 +535,7 @@ namespace array_join {
sepObj: Object, locales: Object, options: Object): Object {
// 3. If separator is undefined, let sep be the single-element String ",".
// 4. Else, let sep be ? ToString(separator).
- let sep: String =
+ const sep: String =
sepObj == Undefined ? ',' : ToString_Inline(context, sepObj);
// If the receiver is not empty and not already being joined, continue with
@@ -557,7 +557,8 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-array.prototype.join
transitioning javascript builtin
- ArrayPrototypeJoin(context: Context, receiver: Object, ...arguments): Object {
+ ArrayPrototypeJoin(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
const separator: Object = arguments[0];
// 1. Let O be ? ToObject(this value).
@@ -566,8 +567,8 @@ namespace array_join {
// 2. Let len be ? ToLength(? Get(O, "length")).
const len: Number = GetLengthProperty(o);
- // Only handle valid array lengths. Although the spec allows larger values,
- // this matches historical V8 behavior.
+ // Only handle valid array lengths. Although the spec allows larger
+ // values, this matches historical V8 behavior.
if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength);
return CycleProtectedArrayJoin<JSArray>(
@@ -576,7 +577,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring
transitioning javascript builtin ArrayPrototypeToLocaleString(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const locales: Object = arguments[0];
const options: Object = arguments[1];
@@ -586,8 +587,8 @@ namespace array_join {
// 2. Let len be ? ToLength(? Get(O, "length")).
const len: Number = GetLengthProperty(o);
- // Only handle valid array lengths. Although the spec allows larger values,
- // this matches historical V8 behavior.
+ // Only handle valid array lengths. Although the spec allows larger
+ // values, this matches historical V8 behavior.
if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength);
return CycleProtectedArrayJoin<JSArray>(
@@ -596,7 +597,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-array.prototype.tostring
transitioning javascript builtin ArrayPrototypeToString(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// 1. Let array be ? ToObject(this value).
const array: JSReceiver = ToObject_Inline(context, receiver);
@@ -617,7 +618,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join
transitioning javascript builtin TypedArrayPrototypeJoin(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const separator: Object = arguments[0];
// Spec: ValidateTypedArray is applied to the this value prior to evaluating
@@ -632,7 +633,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring
transitioning javascript builtin TypedArrayPrototypeToLocaleString(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const locales: Object = arguments[0];
const options: Object = arguments[1];
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index d6213157dc..5ebc451e43 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -12,7 +12,7 @@ namespace array_lastindexof {
labels IfHole {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
const element: Object = elements.objects[index];
- if (element == Hole) goto IfHole;
+ if (element == TheHole) goto IfHole;
return element;
}
@@ -131,7 +131,7 @@ namespace array_lastindexof {
// https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf
transitioning javascript builtin ArrayPrototypeLastIndexOf(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq
index 7546f1cd00..dda569c682 100644
--- a/deps/v8/src/builtins/array-map.tq
+++ b/deps/v8/src/builtins/array-map.tq
@@ -4,9 +4,10 @@
namespace array_map {
transitioning javascript builtin
- ArrayMapLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object): Object {
+ ArrayMapLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -26,9 +27,10 @@ namespace array_map {
}
transitioning javascript builtin
- ArrayMapLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object, result: Object): Object {
+ ArrayMapLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object, result: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -55,7 +57,7 @@ namespace array_map {
}
transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
array: JSReceiver, o: JSReceiver, initialK: Number,
length: Number): Object {
// 6. Let k be 0.
@@ -94,7 +96,7 @@ namespace array_map {
}
CreateJSArray(implicit context: Context)(validLength: Smi): JSArray {
- let length: Smi = this.fixedArray.length;
+ const length: Smi = this.fixedArray.length;
assert(validLength <= length);
let kind: ElementsKind = PACKED_SMI_ELEMENTS;
if (!this.onlySmis) {
@@ -114,7 +116,7 @@ namespace array_map {
kind = FastHoleyElementsKind(kind);
}
- let map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
let a: JSArray;
if (IsDoubleElementsKind(kind)) {
@@ -130,7 +132,7 @@ namespace array_map {
elements.floats[i] = Convert<float64>(n);
}
case (h: HeapObject): {
- assert(h == Hole);
+ assert(h == TheHole);
}
}
}
@@ -182,11 +184,11 @@ namespace array_map {
}
transitioning macro FastArrayMap(implicit context: Context)(
- fastO: FastJSArray, len: Smi, callbackfn: Callable,
+ fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable,
thisArg: Object): JSArray
labels Bailout(JSArray, Smi) {
let k: Smi = 0;
- let fastOW = NewFastJSArrayWitness(fastO);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
let vector = NewVector(len);
// Build a fast loop over the smi array.
@@ -220,24 +222,12 @@ namespace array_map {
return vector.CreateJSArray(len);
}
- // Bails out if the slow path needs to be taken.
- // It's useful to structure it this way, because the consequences of
- // using the slow path on species creation are interesting to the caller.
- macro FastMapSpeciesCreate(implicit context: Context)(
- receiver: JSReceiver, length: Number): JSArray labels Bailout {
- if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
- const o = Cast<FastJSArray>(receiver) otherwise Bailout;
- const smiLength = Cast<Smi>(length) otherwise Bailout;
- const newMap: Map =
- LoadJSArrayElementsMap(PACKED_SMI_ELEMENTS, LoadNativeContext(context));
- return AllocateJSArray(PACKED_SMI_ELEMENTS, newMap, smiLength, smiLength);
- }
-
// https://tc39.github.io/ecma262/#sec-array.prototype.map
transitioning javascript builtin
- ArrayMap(implicit context: Context)(receiver: Object, ...arguments): Object {
+ ArrayMap(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
try {
- if (IsNullOrUndefined(receiver)) goto NullOrUndefinedError;
+ RequireObjectCoercible(receiver, 'Array.prototype.map');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -258,7 +248,7 @@ namespace array_map {
try {
// 5. Let A be ? ArraySpeciesCreate(O, len).
if (IsArraySpeciesProtectorCellInvalid()) goto SlowSpeciesCreate;
- const o: FastJSArray = Cast<FastJSArray>(receiver)
+ const o: FastJSArrayForRead = Cast<FastJSArrayForRead>(receiver)
otherwise SlowSpeciesCreate;
const smiLength: Smi = Cast<Smi>(len)
otherwise SlowSpeciesCreate;
@@ -279,8 +269,5 @@ namespace array_map {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.map');
- }
}
}
diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq
index 76123207fd..7293318625 100644
--- a/deps/v8/src/builtins/array-of.tq
+++ b/deps/v8/src/builtins/array-of.tq
@@ -5,7 +5,8 @@
namespace array_of {
// https://tc39.github.io/ecma262/#sec-array.of
transitioning javascript builtin
- ArrayOf(implicit context: Context)(receiver: Object, ...arguments): Object {
+ ArrayOf(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
// 1. Let len be the actual number of arguments passed to this function.
const len: Smi = Convert<Smi>(arguments.length);
@@ -35,7 +36,7 @@ namespace array_of {
// 7. Repeat, while k < len
while (k < len) {
// a. Let kValue be items[k].
- let kValue: Object = items[Convert<intptr>(k)];
+ const kValue: Object = items[Convert<intptr>(k)];
// b. Let Pk be ! ToString(k).
// c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue).
diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq
index 33661c38d1..b1aa71b85b 100644
--- a/deps/v8/src/builtins/array-reduce-right.tq
+++ b/deps/v8/src/builtins/array-reduce-right.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArrayReduceRightPreLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, length: Object): Object {
+ ArrayReduceRightPreLoopEagerDeoptContinuation(
+ js-implicit context: Context,
+ receiver: Object)(callback: Object, length: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -21,12 +22,13 @@ namespace array {
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
return ArrayReduceRightLoopContinuation(
- jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength);
+ jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength);
}
transitioning javascript builtin
- ArrayReduceRightLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceRightLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
accumulator: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -45,27 +47,28 @@ namespace array {
}
transitioning javascript builtin
- ArrayReduceRightLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceRightLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
result: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// The accumulator is the result from the callback call which just occured.
- let r = ArrayReduceRightLoopContinuation(
+ const r = ArrayReduceRightLoopContinuation(
jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
return r;
}
transitioning builtin ArrayReduceRightLoopContinuation(implicit context:
Context)(
- receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
- o: JSReceiver, initialK: Number, length: Number): Object {
+ _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
+ o: JSReceiver, initialK: Number, _length: Number): Object {
let accumulator = initialAccumulator;
// 8b and 9. Repeat, while k >= 0
@@ -82,7 +85,7 @@ namespace array {
// 8b iii and 9c i. Let kValue be ? Get(O, Pk).
const value: Object = GetProperty(o, k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
// 8b iii 1.
accumulator = value;
} else {
@@ -99,7 +102,7 @@ namespace array {
// 8c. if kPresent is false, throw a TypeError exception.
// If the accumulator is discovered with the sentinel hole value,
// this means kPresent is false.
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
}
return accumulator;
@@ -111,9 +114,9 @@ namespace array {
labels Bailout(Number, Object) {
let accumulator = initialAccumulator;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1, accumulator);
- let fastO =
- Cast<FastJSArray>(o) otherwise goto Bailout(len - 1, accumulator);
- let fastOW = NewFastJSArrayWitness(fastO);
+ const fastO = Cast<FastJSArrayForRead>(o)
+ otherwise goto Bailout(len - 1, accumulator);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
// Build a fast loop over the array.
for (let k: Smi = smiLen - 1; k >= 0; k--) {
@@ -123,7 +126,7 @@ namespace array {
if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -131,7 +134,7 @@ namespace array {
fastOW.Get());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
}
return accumulator;
@@ -139,12 +142,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight
transitioning javascript builtin
- ArrayReduceRight(implicit context: Context)(receiver: Object, ...arguments):
- Object {
+ ArrayReduceRight(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.reduceRight');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -162,7 +163,8 @@ namespace array {
// exception. (This case is handled at the end of
// ArrayReduceRightLoopContinuation).
- const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole;
+ const initialValue: Object =
+ arguments.length > 1 ? arguments[1] : TheHole;
try {
return FastArrayReduceRight(o, len, callbackfn, initialValue)
@@ -176,8 +178,5 @@ namespace array {
label NoCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduceRight');
- }
}
}
diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq
index 67a112fd41..a5f6feb9cc 100644
--- a/deps/v8/src/builtins/array-reduce.tq
+++ b/deps/v8/src/builtins/array-reduce.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArrayReducePreLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, length: Object): Object {
+ ArrayReducePreLoopEagerDeoptContinuation(
+ js-implicit context: Context,
+ receiver: Object)(callback: Object, length: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -21,12 +22,13 @@ namespace array {
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
return ArrayReduceLoopContinuation(
- jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength);
+ jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength);
}
transitioning javascript builtin
- ArrayReduceLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
accumulator: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -45,25 +47,26 @@ namespace array {
}
transitioning javascript builtin
- ArrayReduceLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
result: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// The accumulator is the result from the callback call which just occured.
- let r = ArrayReduceLoopContinuation(
+ const r = ArrayReduceLoopContinuation(
jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
return r;
}
transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
+ _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
o: JSReceiver, initialK: Number, length: Number): Object {
let accumulator = initialAccumulator;
@@ -81,7 +84,7 @@ namespace array {
// 6c. i. Let kValue be ? Get(O, Pk).
const value: Object = GetProperty(o, k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
// 8b.
accumulator = value;
} else {
@@ -98,7 +101,7 @@ namespace array {
// 8c. if kPresent is false, throw a TypeError exception.
// If the accumulator is discovered with the sentinel hole value,
// this means kPresent is false.
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
}
return accumulator;
@@ -110,9 +113,10 @@ namespace array {
labels Bailout(Number, Object) {
const k = 0;
let accumulator = initialAccumulator;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
- let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k, accumulator);
- let fastOW = NewFastJSArrayWitness(fastO);
+ Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
+ const fastO =
+ Cast<FastJSArrayForRead>(o) otherwise goto Bailout(k, accumulator);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
// Build a fast loop over the array.
for (let k: Smi = 0; k < len; k++) {
@@ -122,7 +126,7 @@ namespace array {
if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -130,7 +134,7 @@ namespace array {
fastOW.Get());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
}
return accumulator;
@@ -138,12 +142,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.reduce
transitioning javascript builtin
- ArrayReduce(implicit context: Context)(receiver: Object, ...arguments):
+ ArrayReduce(js-implicit context: Context, receiver: Object)(...arguments):
Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.reduce');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -161,7 +163,8 @@ namespace array {
// exception. (This case is handled at the end of
// ArrayReduceLoopContinuation).
- const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole;
+ const initialValue: Object =
+ arguments.length > 1 ? arguments[1] : TheHole;
try {
return FastArrayReduce(o, len, callbackfn, initialValue)
@@ -175,8 +178,5 @@ namespace array {
label NoCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduce');
- }
}
}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index f1ba8fddf7..82d2e6b605 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -165,7 +165,7 @@ namespace array_reverse {
// https://tc39.github.io/ecma262/#sec-array.prototype.reverse
transitioning javascript builtin ArrayPrototypeReverse(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
try {
TryFastPackedArrayReverse(receiver) otherwise Baseline;
return receiver;
diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq
index 3c8c1491bb..4dd82d7b88 100644
--- a/deps/v8/src/builtins/array-shift.tq
+++ b/deps/v8/src/builtins/array-shift.tq
@@ -103,7 +103,7 @@ namespace array_shift {
// https://tc39.github.io/ecma262/#sec-array.prototype.shift
transitioning javascript builtin ArrayPrototypeShift(
- implicit context: Context)(receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
try {
return TryFastArrayShift(receiver, arguments) otherwise Slow;
}
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index 5162329408..c3a6ac75cb 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -63,7 +63,7 @@ namespace array_slice {
for (let current: Smi = start; current < to; ++current) {
const e: Object =
sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
- const newElement: Object = e != Hole ?
+ const newElement: Object = e != TheHole ?
argumentsContext[UnsafeCast<Smi>(e)] :
unmappedElements.objects[current];
// It is safe to skip the write barrier here because resultElements was
@@ -105,7 +105,6 @@ namespace array_slice {
return ExtractFastJSArray(context, a, start, count);
}
case (a: JSArgumentsObjectWithLength): {
- const nativeContext: NativeContext = LoadNativeContext(context);
const map: Map = a.map;
if (IsFastAliasedArgumentsMap(map)) {
return HandleFastAliasedSloppyArgumentsSlice(context, a, start, count)
@@ -123,8 +122,8 @@ namespace array_slice {
// https://tc39.github.io/ecma262/#sec-array.prototype.slice
transitioning javascript builtin
- ArrayPrototypeSlice(context: Context, receiver: Object, ...arguments):
- Object {
+ ArrayPrototypeSlice(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// Handle array cloning case if the receiver is a fast array.
if (arguments.length == 0) {
typeswitch (receiver) {
@@ -186,7 +185,7 @@ namespace array_slice {
// 10. Repeat, while k < final
while (k < final) {
// a. Let Pk be ! ToString(k).
- let pK: Number = k;
+ const pK: Number = k;
// b. Let kPresent be ? HasProperty(O, Pk).
const fromPresent: Boolean = HasProperty(o, pK);
diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq
index f68ea4ac30..a30af4e47a 100644
--- a/deps/v8/src/builtins/array-some.tq
+++ b/deps/v8/src/builtins/array-some.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArraySomeLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArraySomeLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized some implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -25,9 +26,10 @@ namespace array {
}
transitioning javascript builtin
- ArraySomeLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArraySomeLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ result: Object): Object {
// All continuation points in the optimized some implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -51,9 +53,9 @@ namespace array {
}
transitioning builtin ArraySomeLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
- array: Object, o: JSReceiver, initialK: Number, length: Number,
- initialTo: Object): Object {
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _array: Object, o: JSReceiver, initialK: Number, length: Number,
+ _initialTo: Object): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Number = initialK; k < length; k++) {
@@ -88,7 +90,7 @@ namespace array {
labels Bailout(Smi) {
let k: Smi = 0;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
let fastOW = NewFastJSArrayWitness(fastO);
// Build a fast loop over the smi array.
@@ -109,11 +111,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.some
transitioning javascript builtin
- ArraySome(implicit context: Context)(receiver: Object, ...arguments): Object {
+ ArraySome(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.some');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -143,8 +144,5 @@ namespace array {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.some');
- }
}
}
diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq
index e24b51760c..3b65bb03d4 100644
--- a/deps/v8/src/builtins/array-splice.tq
+++ b/deps/v8/src/builtins/array-splice.tq
@@ -54,8 +54,7 @@ namespace array_splice {
macro FastSplice<FixedArrayType: type, ElementType: type>(implicit context:
Context)(
args: Arguments, a: JSArray, length: Smi, newLength: Smi,
- lengthDelta: Smi, actualStart: Smi, insertCount: Smi,
- actualDeleteCount: Smi): void labels Bailout {
+ actualStart: Smi, insertCount: Smi, actualDeleteCount: Smi): void {
// Make sure elements are writable.
array::EnsureWriteableFastElements(a);
@@ -77,7 +76,7 @@ namespace array_splice {
UnsafeCast<FixedArrayType>(elements), dstIndex, srcIndex, count);
} else {
// Grow.
- let capacity: Smi = CalculateNewElementsCapacity(newLength);
+ const capacity: Smi = CalculateNewElementsCapacity(newLength);
const newElements: FixedArrayType =
Extract<FixedArrayType>(elements, 0, actualStart, capacity);
a.elements = newElements;
@@ -168,12 +167,12 @@ namespace array_splice {
if (IsFastSmiOrTaggedElementsKind(elementsKind)) {
FastSplice<FixedArray, Object>(
- args, a, length, newLength, lengthDelta, actualStart, insertCount,
- actualDeleteCount) otherwise Bailout;
+ args, a, length, newLength, actualStart, insertCount,
+ actualDeleteCount);
} else {
FastSplice<FixedDoubleArray, Number>(
- args, a, length, newLength, lengthDelta, actualStart, insertCount,
- actualDeleteCount) otherwise Bailout;
+ args, a, length, newLength, actualStart, insertCount,
+ actualDeleteCount);
}
return deletedResult;
@@ -301,8 +300,6 @@ namespace array_splice {
context: Context, arguments: Arguments, o: JSReceiver, len: Number,
actualStart: Number, insertCount: Smi,
actualDeleteCount: Number): Object {
- const affected: Number = len - actualStart - actualDeleteCount;
-
// 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount).
const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount);
const itemCount: Number = insertCount;
@@ -353,8 +350,8 @@ namespace array_splice {
// https://tc39.github.io/ecma262/#sec-array.prototype.splice
transitioning javascript builtin
- ArrayPrototypeSplice(context: Context, receiver: Object, ...arguments):
- Object {
+ ArrayPrototypeSplice(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject(context, receiver);
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index b2e746db47..e685d520cd 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -93,7 +93,7 @@ namespace array_unshift {
// https://tc39.github.io/ecma262/#sec-array.prototype.unshift
transitioning javascript builtin ArrayPrototypeUnshift(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
try {
TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline;
}
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index 9807db19c6..7e044e086b 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -33,18 +33,19 @@ namespace array {
}
macro IsJSArray(implicit context: Context)(o: Object): bool {
- try {
- const array: JSArray = Cast<JSArray>(o) otherwise NotArray;
- return true;
- }
- label NotArray {
- return false;
+ typeswitch (o) {
+ case (JSArray): {
+ return true;
+ }
+ case (Object): {
+ return false;
+ }
}
}
macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object {
const e: Object = a.objects[i];
- return e == Hole ? Undefined : e;
+ return e == TheHole ? Undefined : e;
}
macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
@@ -62,26 +63,7 @@ namespace array {
}
macro StoreArrayHole(elements: FixedArray, k: Smi): void {
- elements.objects[k] = Hole;
- }
-
- macro CopyArrayElement(
- elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void {
- const e: Object = elements.objects[from];
- newElements.objects[to] = e;
- }
-
- macro CopyArrayElement(
- elements: FixedDoubleArray, newElements: FixedDoubleArray, from: Smi,
- to: Smi): void {
- try {
- const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from)
- otherwise FoundHole;
- newElements.floats[to] = floatValue;
- }
- label FoundHole {
- StoreArrayHole(newElements, to);
- }
+ elements.objects[k] = TheHole;
}
extern macro SetPropertyLength(implicit context: Context)(Object, Number);
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 76e1a486c8..4aa1d57837 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -14,9 +14,11 @@
#include 'src/objects/js-generator.h'
#include 'src/objects/js-promise.h'
#include 'src/objects/js-regexp-string-iterator.h'
-#include 'src/objects/module.h'
+#include 'src/objects/js-weak-refs.h'
#include 'src/objects/objects.h'
+#include 'src/objects/source-text-module.h'
#include 'src/objects/stack-frame-info.h'
+#include 'src/objects/synthetic-module.h'
#include 'src/objects/template-objects.h'
type void;
@@ -31,12 +33,16 @@ type PositiveSmi extends Smi;
// The Smi value zero, which is often used as null for HeapObject types.
type Zero extends PositiveSmi;
+// A value with the size of Tagged which may contain arbitrary data.
+type Uninitialized extends Tagged;
+
@abstract
extern class HeapObject extends Tagged {
map: Map;
}
type Object = Smi | HeapObject;
+
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int31 extends int32
@@ -84,32 +90,33 @@ extern class Oddball extends HeapObject {
extern class HeapNumber extends HeapObject { value: float64; }
type Number = Smi | HeapNumber;
-type BigInt extends HeapObject generates 'TNode<BigInt>';
type Numeric = Number | BigInt;
@abstract
-@noVerifier
+@generateCppClass
extern class Name extends HeapObject {
- hash_field: int32;
+ hash_field: uint32;
}
+@generateCppClass
extern class Symbol extends Name {
flags: int32;
- name: Object;
+ name: Object; // The print name of a symbol, or undefined if none.
}
@abstract
+@generateCppClass
extern class String extends Name {
- length: uint32;
+ length: int32;
}
+@generateCppClass
extern class ConsString extends String {
first: String;
second: String;
}
@abstract
-@noVerifier
extern class ExternalString extends String {
resource: RawPtr;
resource_data: RawPtr;
@@ -118,28 +125,37 @@ extern class ExternalString extends String {
extern class ExternalOneByteString extends ExternalString {}
extern class ExternalTwoByteString extends ExternalString {}
-extern class InternalizedString extends String {}
+@generateCppClass
+extern class InternalizedString extends String {
+}
// TODO(v8:8983): Add declaration for variable-sized region.
@abstract
-@noVerifier
+@generateCppClass
extern class SeqString extends String {
}
-extern class SeqOneByteString extends SeqString {}
-extern class SeqTwoByteString extends SeqString {}
+@generateCppClass
+extern class SeqOneByteString extends SeqString {
+}
+@generateCppClass
+extern class SeqTwoByteString extends SeqString {
+}
+@generateCppClass
extern class SlicedString extends String {
parent: String;
offset: Smi;
}
-extern class ThinString extends String { actual: String; }
+@generateCppClass
+extern class ThinString extends String {
+ actual: String;
+}
// The HeapNumber value NaN
type NaN extends HeapNumber;
@abstract
-@noVerifier
@generatePrint
@generateCppClass
extern class Struct extends HeapObject {
@@ -169,7 +185,6 @@ type DirectString extends String;
type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
@abstract
-@noVerifier
@generateCppClass
extern class FixedArrayBase extends HeapObject {
length: Smi;
@@ -190,9 +205,7 @@ type LayoutDescriptor extends ByteArray
type TransitionArray extends WeakFixedArray
generates 'TNode<TransitionArray>';
-// InstanceType actually extends uint16, but a bunch of methods in
-// CodeStubAssembler expect a TNode<Int32T>, so keeping it signed for now.
-type InstanceType extends int16 constexpr 'InstanceType';
+type InstanceType extends uint16 constexpr 'InstanceType';
extern class Map extends HeapObject {
instance_size_in_words: uint8;
@@ -214,19 +227,21 @@ extern class Map extends HeapObject {
@ifnot(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: void;
dependent_code: DependentCode;
prototype_validity_cell: Smi | Cell;
+ // TODO(v8:9108): Misusing "weak" keyword; type should be
+ // Map | Weak<Map> | TransitionArray | PrototypeInfo | Smi.
weak transitions_or_prototype_info: Map | TransitionArray |
PrototypeInfo | Smi;
}
-type BytecodeArray extends FixedArrayBase;
-
@generatePrint
+@generateCppClass
extern class EnumCache extends Struct {
keys: FixedArray;
indices: FixedArray;
}
@generatePrint
+@generateCppClass
extern class SourcePositionTableWithFrameCache extends Struct {
source_position_table: ByteArray;
stack_frame_cache: Object;
@@ -250,8 +265,7 @@ extern class DescriptorArray extends HeapObject {
// than building the definition from C++.
intrinsic %GetAllocationBaseSize<Class: type>(map: Map): intptr;
intrinsic %Allocate<Class: type>(size: intptr): Class;
-intrinsic %AllocateInternalClass<Class: type>(slotCount: constexpr intptr):
- Class;
+intrinsic %GetStructMap(instanceKind: constexpr InstanceType): Map;
intrinsic %AddIndexedFieldSizeToObjectSize<T: type>(
baseSize: intptr, indexSize: T, fieldSize: int32): intptr {
@@ -282,24 +296,35 @@ intrinsic
}
@abstract
-@noVerifier
extern class JSReceiver extends HeapObject {
- properties_or_hash: FixedArrayBase | Smi;
+ properties_or_hash: FixedArrayBase | PropertyArray | Smi;
}
type Constructor extends JSReceiver;
@abstract
@dirtyInstantiatedAbstractClass
+@generateCppClass
extern class JSObject extends JSReceiver {
- @noVerifier elements: FixedArrayBase;
+ // [elements]: The elements (properties with names that are integers).
+ //
+ // Elements can be in two general modes: fast and slow. Each mode
+ // corresponds to a set of object representations of elements that
+ // have something in common.
+ //
+ // In the fast mode elements is a FixedArray and so each element can be
+ // quickly accessed. The elements array can have one of several maps in this
+ // mode: fixed_array_map, fixed_double_array_map,
+ // sloppy_arguments_elements_map or fixed_cow_array_map (for copy-on-write
+ // arrays). In the latter case the elements array may be shared by a few
+ // objects and so before writing to any element the array must be copied. Use
+ // EnsureWritableFastElements in this case.
+ //
+ // In the slow mode the elements is either a NumberDictionary or a
+ // FixedArray parameter map for a (sloppy) arguments object.
+ elements: FixedArrayBase;
}
-macro NewJSObject(
- map: Map, properties: FixedArrayBase | Smi,
- elements: FixedArrayBase): JSObject {
- return new JSObject{map, properties_or_hash: properties, elements};
-}
macro NewJSObject(implicit context: Context)(): JSObject {
const objectFunction: JSFunction = GetObjectFunction();
const map: Map = Cast<Map>(objectFunction.prototype_or_initial_map)
@@ -328,19 +353,33 @@ macro GetDerivedMap(implicit context: Context)(
}
}
+macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map):
+ JSObject {
+ let properties = kEmptyFixedArray;
+ if (IsDictionaryMap(map)) {
+ properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
+ }
+ return AllocateJSObjectFromMap(
+ map, properties, kEmptyFixedArray, kNone, kWithSlackTracking);
+}
+
extern class JSFunction extends JSObject {
shared_function_info: SharedFunctionInfo;
context: Context;
feedback_cell: FeedbackCell;
weak code: Code;
+
+ // Space for the following field may or may not be allocated.
@noVerifier weak prototype_or_initial_map: JSReceiver | Map;
}
+@generateCppClass
extern class JSProxy extends JSReceiver {
- target: Object;
- handler: Object;
+ target: JSReceiver | Null;
+ handler: JSReceiver | Null;
}
+// Just a starting shape for JSObject; properties can move after initialization.
@noVerifier
extern class JSProxyRevocableResult extends JSObject {
proxy: Object;
@@ -358,21 +397,39 @@ macro NewJSProxyRevocableResult(implicit context: Context)(
};
}
-extern class JSGlobalProxy extends JSObject { native_context: Object; }
+@generateCppClass
+extern class JSGlobalProxy extends JSObject {
+ // [native_context]: the owner native context of this global proxy object.
+ // It is null value if this object is not used by any context.
+ native_context: Object;
+}
-extern class JSValue extends JSObject { value: Object; }
+@generateCppClass
+extern class JSPrimitiveWrapper extends JSObject {
+ value: Object;
+}
extern class JSArgumentsObject extends JSObject {}
+
+// Just a starting shape for JSObject; properties can move after initialization.
@noVerifier
@hasSameInstanceTypeAsParent
extern class JSArgumentsObjectWithLength extends JSArgumentsObject {
length: Object;
}
+
+// Just a starting shape for JSObject; properties can move after initialization.
@hasSameInstanceTypeAsParent
extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength {
callee: Object;
}
+// Just a starting shape for JSObject; properties can move after initialization.
+@hasSameInstanceTypeAsParent
+@noVerifier
+extern class JSStrictArgumentsObject extends JSArgumentsObjectWithLength {
+}
+
extern class JSArrayIterator extends JSObject {
iterated_object: JSReceiver;
next_index: Number;
@@ -405,20 +462,6 @@ macro NewJSArray(implicit context: Context)(): JSArray {
};
}
-struct HoleIterator {
- Next(): Object labels NoMore() {
- return Hole;
- }
-}
-
-macro NewJSArray(implicit context: Context)(map: Map, length: Smi): JSArray {
- const map = GetFastPackedSmiElementsJSArrayMap();
- const i = HoleIterator{};
- const elements = new FixedArray{map, length, objects: ...i};
- return new
- JSArray{map, properties_or_hash: kEmptyFixedArray, elements, length};
-}
-
// A HeapObject with a JSArray map, and either fast packed elements, or fast
// holey elements when the global NoElementsProtector is not invalidated.
transient type FastJSArray extends JSArray;
@@ -441,18 +484,61 @@ transient type FastJSArrayForReadWithNoCustomIteration extends
type NoSharedNameSentinel extends Smi;
-type JSModuleNamespace extends JSObject;
-type WeakArrayList extends HeapObject;
+@generateCppClass
+extern class CallHandlerInfo extends Struct {
+ callback: Foreign | Undefined;
+ js_callback: Foreign | Undefined;
+ data: Object;
+}
+
+type ObjectHashTable extends FixedArray;
@abstract
+extern class Module extends HeapObject {
+ exports: ObjectHashTable;
+ hash: Smi;
+ status: Smi;
+ module_namespace: JSModuleNamespace | Undefined;
+ exception: Object;
+}
+
+type SourceTextModuleInfo extends FixedArray;
+
+extern class SourceTextModule extends Module {
+ code: SharedFunctionInfo | JSFunction |
+ JSGeneratorObject | SourceTextModuleInfo;
+ regular_exports: FixedArray;
+ regular_imports: FixedArray;
+ requested_modules: FixedArray;
+ script: Script;
+ import_meta: TheHole | JSObject;
+ dfs_index: Smi;
+ dfs_ancestor_index: Smi;
+}
+
+extern class SyntheticModule extends Module {
+ name: String;
+ export_names: FixedArray;
+ evaluation_steps: Foreign;
+}
+
+@abstract
+extern class JSModuleNamespace extends JSObject {
+ module: Module;
+}
+
+@hasSameInstanceTypeAsParent
@noVerifier
+extern class TemplateList extends FixedArray {
+}
+
+@abstract
extern class JSWeakCollection extends JSObject {
table: Object;
}
extern class JSWeakSet extends JSWeakCollection {}
extern class JSWeakMap extends JSWeakCollection {}
-@noVerifier
extern class JSCollectionIterator extends JSObject {
table: Object;
index: Object;
@@ -474,12 +560,20 @@ extern class JSMessageObject extends JSObject {
error_level: Smi;
}
+extern class WeakArrayList extends HeapObject {
+ capacity: Smi;
+ length: Smi;
+ // TODO(v8:8983): declare variable-sized region for contained MaybeObject's
+ // objects[length]: MaybeObject;
+}
+
extern class PrototypeInfo extends Struct {
js_module_namespace: JSModuleNamespace | Undefined;
prototype_users: WeakArrayList | Zero;
registry_slot: Smi;
validity_cell: Object;
- @noVerifier object_create_map: Smi | WeakArrayList;
+ // TODO(v8:9108): Should be Weak<Map> | Undefined.
+ @noVerifier object_create_map: Map | Undefined;
bit_field: Smi;
}
@@ -503,7 +597,7 @@ extern class Script extends Struct {
extern class EmbedderDataArray extends HeapObject { length: Smi; }
-type ScopeInfo extends Object generates 'TNode<ScopeInfo>';
+type ScopeInfo extends HeapObject generates 'TNode<ScopeInfo>';
extern class PreparseData extends HeapObject {
// TODO(v8:8983): Add declaration for variable-sized region.
@@ -527,16 +621,30 @@ extern class SharedFunctionInfo extends HeapObject {
expected_nof_properties: uint16;
function_token_offset: int16;
flags: int32;
+ function_literal_id: int32;
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
}
extern class JSBoundFunction extends JSObject {
- bound_target_function: JSReceiver;
+ bound_target_function: Callable;
bound_this: Object;
bound_arguments: FixedArray;
}
-type Callable = JSFunction | JSBoundFunction | JSProxy;
+// Specialized types. The following three type definitions don't correspond to
+// actual C++ classes, but have Is... methods that check additional constraints.
+
+// A Foreign object whose raw pointer is not allowed to be null.
+type NonNullForeign extends Foreign;
+
+// A function built with InstantiateFunction for the public API.
+type CallableApiObject extends HeapObject;
+
+// A JSProxy with the callable bit set.
+type CallableJSProxy extends JSProxy;
+
+type Callable =
+ JSFunction | JSBoundFunction | CallableJSProxy | CallableApiObject;
extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
FixedArrayBase): intptr;
@@ -547,7 +655,7 @@ type NumberDictionary extends HeapObject
extern class FreeSpace extends HeapObject {
size: Smi;
- @noVerifier next: FreeSpace;
+ next: FreeSpace | Uninitialized;
}
// %RawDownCast should *never* be used anywhere in Torque code except for
@@ -609,45 +717,12 @@ extern class JSArrayBufferView extends JSObject {
}
extern class JSTypedArray extends JSArrayBufferView {
- AttachOffHeapBuffer(buffer: JSArrayBuffer, byteOffset: uintptr): void {
- const basePointer: Smi = 0;
-
- // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
- // platforms are self-limiting, because we can't allocate an array bigger
- // than our 32-bit arithmetic range anyway. 64 bit platforms could
- // theoretically have an offset up to 2^35 - 1.
- const backingStore = buffer.backing_store;
- const externalPointer = backingStore + Convert<intptr>(byteOffset);
-
- // Assert no overflow has occurred. Only assert if the mock array buffer
- // allocator is NOT used. When the mock array buffer is used, impossibly
- // large allocations are allowed that would erroneously cause an overflow
- // and this assertion to fail.
- assert(
- IsMockArrayBufferAllocatorFlag() ||
- Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore));
-
- this.elements = kEmptyByteArray;
- this.buffer = buffer;
- this.external_pointer = externalPointer;
- this.base_pointer = basePointer;
- }
-
length: uintptr;
external_pointer: RawPtr;
base_pointer: ByteArray | Smi;
}
-@noVerifier
-extern class JSAccessorPropertyDescriptor extends JSObject {
- get: Object;
- set: Object;
- enumerable: Object;
- configurable: Object;
-}
-
@abstract
-@noVerifier
extern class JSCollection extends JSObject {
table: Object;
}
@@ -681,14 +756,6 @@ extern class JSStringIterator extends JSObject {
next_index: Smi;
}
-@noVerifier
-extern class JSDataPropertyDescriptor extends JSObject {
- value: Object;
- writable: Object;
- enumerable: Object;
- configurable: Object;
-}
-
@abstract
extern class TemplateInfo extends Struct {
tag: Object;
@@ -722,7 +789,7 @@ extern class FunctionTemplateInfo extends TemplateInfo {
function_template_rare_data: Object;
shared_function_info: Object;
flag: Smi;
- @noVerifier length: Smi;
+ length: Smi;
cached_property_name: Object;
}
@@ -749,8 +816,6 @@ type LanguageMode extends Smi constexpr 'LanguageMode';
type ExtractFixedArrayFlags
generates 'TNode<Smi>'
constexpr 'CodeStubAssembler::ExtractFixedArrayFlags';
-type ParameterMode
- generates 'TNode<Int32T>' constexpr 'ParameterMode';
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
@@ -770,21 +835,21 @@ const UTF32:
extern class Foreign extends HeapObject { foreign_address: RawPtr; }
extern class InterceptorInfo extends Struct {
- @noVerifier getter: Foreign | Zero;
- @noVerifier setter: Foreign | Zero;
- @noVerifier query: Foreign | Zero;
- @noVerifier descriptor: Foreign | Zero;
- @noVerifier deleter: Foreign | Zero;
- @noVerifier enumerator: Foreign | Zero;
- @noVerifier definer: Foreign | Zero;
+ getter: NonNullForeign | Zero | Undefined;
+ setter: NonNullForeign | Zero | Undefined;
+ query: NonNullForeign | Zero | Undefined;
+ descriptor: NonNullForeign | Zero | Undefined;
+ deleter: NonNullForeign | Zero | Undefined;
+ enumerator: NonNullForeign | Zero | Undefined;
+ definer: NonNullForeign | Zero | Undefined;
data: Object;
flags: Smi;
}
extern class AccessCheckInfo extends Struct {
- callback: Foreign | Zero;
- named_interceptor: InterceptorInfo | Zero;
- indexed_interceptor: InterceptorInfo | Zero;
+ callback: Foreign | Zero | Undefined;
+ named_interceptor: InterceptorInfo | Zero | Undefined;
+ indexed_interceptor: InterceptorInfo | Zero | Undefined;
data: Object;
}
@@ -800,6 +865,9 @@ extern class Cell extends HeapObject { value: Object; }
extern class DataHandler extends Struct {
smi_handler: Smi | Code;
validity_cell: Smi | Cell;
+
+ // Space for the following fields may or may not be allocated.
+ // TODO(v8:9108): Misusing "weak" keyword; should be MaybeObject.
@noVerifier weak data_1: Object;
@noVerifier weak data_2: Object;
@noVerifier weak data_3: Object;
@@ -850,17 +918,22 @@ extern class StackFrameInfo extends Struct {
column_number: Smi;
promise_all_index: Smi;
script_id: Smi;
- script_name: Object;
- script_name_or_source_url: Object;
- function_name: Object;
- wasm_module_name: Object;
+ script_name: String | Null | Undefined;
+ script_name_or_source_url: String | Null | Undefined;
+ function_name: String | Null | Undefined;
+ method_name: String | Null | Undefined;
+ type_name: String | Null | Undefined;
+ eval_origin: String | Null | Undefined;
+ wasm_module_name: String | Null | Undefined;
flag: Smi;
}
+type FrameArray extends FixedArray;
+
extern class StackTraceFrame extends Struct {
- frame_array: Object;
+ frame_array: FrameArray | Undefined;
frame_index: Smi;
- frame_info: Object;
+ frame_info: StackFrameInfo | Undefined;
id: Smi;
}
@@ -876,9 +949,20 @@ extern class WasmExportedFunctionData extends Struct {
instance: WasmInstanceObject;
jump_table_offset: Smi;
function_index: Smi;
+ // The remaining fields are for fast calling from C++. The contract is
+ // that they are lazily populated, and either all will be present or none.
+ c_wrapper_code: Object;
+ wasm_call_target: Smi; // Pseudo-smi: one-bit shift on all platforms.
+ packed_args_size: Smi;
}
-extern class WasmJSFunctionData extends Struct { wrapper_code: Code; }
+extern class WasmJSFunctionData extends Struct {
+ callable: JSReceiver;
+ wrapper_code: Code;
+ serialized_return_count: Smi;
+ serialized_parameter_count: Smi;
+ serialized_signature: ByteArray; // PodArray<wasm::ValueType>
+}
extern class WasmCapiFunctionData extends Struct {
call_target: RawPtr;
@@ -887,6 +971,16 @@ extern class WasmCapiFunctionData extends Struct {
serialized_signature: ByteArray; // PodArray<wasm::ValueType>
}
+extern class WasmIndirectFunctionTable extends Struct {
+ size: uint32;
+ @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
+ @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
+ sig_ids: RawPtr;
+ targets: RawPtr;
+ managed_native_allocations: Foreign | Undefined;
+ refs: FixedArray;
+}
+
extern class WasmDebugInfo extends Struct {
instance: WasmInstanceObject;
interpreter_handle: Foreign | Undefined;
@@ -947,9 +1041,9 @@ const kAllowLargeObjectAllocation: constexpr AllocationFlags
generates 'CodeStubAssembler::kAllowLargeObjectAllocation';
const kWithSlackTracking: constexpr SlackTrackingMode
- generates 'SlackTrackingMode::kWithSlackTracking';
+ generates 'CodeStubAssembler::SlackTrackingMode::kWithSlackTracking';
const kNoSlackTracking: constexpr SlackTrackingMode
- generates 'SlackTrackingMode::kNoSlackTracking';
+ generates 'CodeStubAssembler::SlackTrackingMode::kNoSlackTracking';
const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags
generates 'CodeStubAssembler::ExtractFixedArrayFlag::kFixedDoubleArrays';
@@ -977,6 +1071,8 @@ const kCalledNonCallable: constexpr MessageTemplate
generates 'MessageTemplate::kCalledNonCallable';
const kCalledOnNullOrUndefined: constexpr MessageTemplate
generates 'MessageTemplate::kCalledOnNullOrUndefined';
+const kProtoObjectOrNull: constexpr MessageTemplate
+ generates 'MessageTemplate::kProtoObjectOrNull';
const kInvalidOffset: constexpr MessageTemplate
generates 'MessageTemplate::kInvalidOffset';
const kInvalidTypedArrayLength: constexpr MessageTemplate
@@ -1003,13 +1099,17 @@ const kSymbolToString: constexpr MessageTemplate
generates 'MessageTemplate::kSymbolToString';
const kPropertyNotFunction: constexpr MessageTemplate
generates 'MessageTemplate::kPropertyNotFunction';
+const kBigIntMaxLength: constexpr intptr
+ generates 'BigInt::kMaxLength';
+const kBigIntTooBig: constexpr MessageTemplate
+ generates 'MessageTemplate::kBigIntTooBig';
const kMaxArrayIndex:
constexpr uint32 generates 'JSArray::kMaxArrayIndex';
const kArrayBufferMaxByteLength:
constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength';
-const V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP:
- constexpr int31 generates 'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP';
+const kMaxTypedArrayInHeap:
+ constexpr int31 generates 'JSTypedArray::kMaxSizeInHeap';
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
const kSmiMaxValue: constexpr uintptr generates 'kSmiMaxValue';
const kSmiMax: uintptr = kSmiMaxValue;
@@ -1054,7 +1154,13 @@ const kStrictReadOnlyProperty: constexpr MessageTemplate
const kString: constexpr PrimitiveType
generates 'PrimitiveType::kString';
-type Hole extends Oddball;
+const kExternalPointerForOnHeapArray: constexpr RawPtr
+ generates 'JSTypedArray::ExternalPointerForOnHeapArray()';
+
+const kNameDictionaryInitialCapacity:
+ constexpr int32 generates 'NameDictionary::kInitialCapacity';
+
+type TheHole extends Oddball;
type Null extends Oddball;
type Undefined extends Oddball;
type True extends Oddball;
@@ -1064,7 +1170,7 @@ type Boolean = True | False;
type NumberOrUndefined = Number | Undefined;
-extern macro TheHoleConstant(): Hole;
+extern macro TheHoleConstant(): TheHole;
extern macro NullConstant(): Null;
extern macro UndefinedConstant(): Undefined;
extern macro TrueConstant(): True;
@@ -1075,7 +1181,7 @@ extern macro EmptyStringConstant(): EmptyString;
extern macro LengthStringConstant(): String;
extern macro NanConstant(): NaN;
-const Hole: Hole = TheHoleConstant();
+const TheHole: TheHole = TheHoleConstant();
const Null: Null = NullConstant();
const Undefined: Undefined = UndefinedConstant();
const True: True = TrueConstant();
@@ -1090,11 +1196,6 @@ const false: constexpr bool generates 'false';
const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict';
const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy';
-const SMI_PARAMETERS: constexpr ParameterMode
- generates 'CodeStubAssembler::SMI_PARAMETERS';
-const INTPTR_PARAMETERS: constexpr ParameterMode
- generates 'CodeStubAssembler::INTPTR_PARAMETERS';
-
const SKIP_WRITE_BARRIER:
constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER';
const UNSAFE_SKIP_WRITE_BARRIER:
@@ -1107,7 +1208,7 @@ extern class AsyncGeneratorRequest extends Struct {
promise: JSPromise;
}
-extern class ModuleInfoEntry extends Struct {
+extern class SourceTextModuleInfoEntry extends Struct {
export_name: String | Undefined;
local_name: String | Undefined;
import_name: String | Undefined;
@@ -1134,7 +1235,7 @@ extern class PromiseReaction extends Struct {
extern class PromiseReactionJobTask extends Microtask {
argument: Object;
context: Context;
- @noVerifier handler: Callable | Undefined;
+ handler: Callable | Undefined;
promise_or_capability: JSPromise | PromiseCapability | Undefined;
}
@@ -1155,22 +1256,8 @@ extern class JSRegExp extends JSObject {
flags: Smi | Undefined;
}
-@noVerifier
-extern class JSIteratorResult extends JSObject {
- value: Object;
- done: Boolean;
-}
-
-macro NewJSIteratorResult(implicit context: Context)(
- value: Object, done: Boolean): JSIteratorResult {
- return new JSIteratorResult{
- map: GetIteratorResultMap(),
- properties_or_hash: kEmptyFixedArray,
- elements: kEmptyFixedArray,
- value,
- done
- };
-}
+extern transitioning macro AllocateJSIteratorResult(implicit context: Context)(
+ Object, Boolean): JSObject;
// Note: Although a condition for a FastJSRegExp is having a positive smi
// lastIndex (see RegExpBuiltinsAssembler::BranchIfFastRegExp), it is possible
@@ -1230,9 +1317,9 @@ extern class AccessorInfo extends Struct {
name: Object;
flags: Smi;
expected_receiver_type: Object;
- @noVerifier setter: Foreign | Zero;
- @noVerifier getter: Foreign | Zero;
- @noVerifier js_getter: Foreign | Zero;
+ setter: NonNullForeign | Zero;
+ getter: NonNullForeign | Zero;
+ js_getter: NonNullForeign | Zero;
data: Object;
}
@@ -1277,7 +1364,7 @@ extern class FeedbackCell extends Struct {
type AllocationSite extends Struct;
extern class AllocationMemento extends Struct {
- @noVerifier allocation_site: AllocationSite;
+ allocation_site: AllocationSite;
}
extern class WasmModuleObject extends JSObject {
@@ -1303,8 +1390,8 @@ extern class WasmMemoryObject extends JSObject {
}
extern class WasmGlobalObject extends JSObject {
- untagged_buffer: JSArrayBuffer;
- tagged_buffer: FixedArray;
+ untagged_buffer: JSArrayBuffer | Undefined;
+ tagged_buffer: FixedArray | Undefined;
offset: Smi;
flags: Smi;
}
@@ -1314,10 +1401,6 @@ extern class WasmExceptionObject extends JSObject {
exception_tag: HeapObject;
}
-@noVerifier
-extern class WasmExceptionPackage extends JSReceiver {
-}
-
type WasmExportedFunction extends JSFunction;
extern class AsmWasmData extends Struct {
@@ -1327,6 +1410,46 @@ extern class AsmWasmData extends Struct {
uses_bitset: HeapNumber;
}
+extern class JSFinalizationGroup extends JSObject {
+ native_context: NativeContext;
+ cleanup: Object;
+ active_cells: Undefined | WeakCell;
+ cleared_cells: Undefined | WeakCell;
+ key_map: Object;
+ next: Undefined | JSFinalizationGroup;
+ flags: Smi;
+}
+
+extern class JSFinalizationGroupCleanupIterator extends JSObject {
+ finalization_group: JSFinalizationGroup;
+}
+
+extern class WeakCell extends HeapObject {
+ finalization_group: Undefined | JSFinalizationGroup;
+ target: Undefined | JSReceiver;
+ holdings: Object;
+ prev: Undefined | WeakCell;
+ next: Undefined | WeakCell;
+ key: Object;
+ key_list_prev: Undefined | WeakCell;
+ key_list_next: Undefined | WeakCell;
+}
+
+extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; }
+
+extern class BytecodeArray extends FixedArrayBase {
+ // TODO(v8:8983): bytecode array object sizes vary based on their contents.
+ constant_pool: FixedArray;
+ handler_table: ByteArray;
+ source_position_table: Undefined | ByteArray |
+ SourcePositionTableWithFrameCache;
+ frame_size: int32;
+ parameter_size: int32;
+ incoming_new_target_or_generator_register: int32;
+ osr_nesting_level: int8;
+ bytecode_age: int8;
+}
+
extern macro Is64(): constexpr bool;
extern macro SelectBooleanConstant(bool): Boolean;
@@ -1358,7 +1481,7 @@ extern transitioning builtin SetProperty(implicit context: Context)(
extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
Object, Object, Object);
extern transitioning builtin DeleteProperty(implicit context: Context)(
- Object, Object, LanguageMode);
+ Object, Object, LanguageMode): Object;
extern transitioning builtin HasProperty(implicit context: Context)(
Object, Object): Boolean;
extern transitioning macro HasProperty_Inline(implicit context: Context)(
@@ -1403,6 +1526,10 @@ extern macro ConstructWithTarget(implicit context: Context)(
extern macro SpeciesConstructor(implicit context: Context)(
Object, JSReceiver): JSReceiver;
+extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool;
+extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
+ NameDictionary;
+
extern builtin ToObject(Context, Object): JSReceiver;
extern macro ToObject_Inline(Context, Object): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
@@ -1598,6 +1725,7 @@ extern operator '==' macro Word32Equal(bool, bool): bool;
extern operator '!=' macro Word32NotEqual(bool, bool): bool;
extern operator '+' macro Float64Add(float64, float64): float64;
+extern operator '-' macro Float64Sub(float64, float64): float64;
extern operator '+' macro NumberAdd(Number, Number): Number;
extern operator '-' macro NumberSub(Number, Number): Number;
@@ -1650,6 +1778,8 @@ extern macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
extern macro IsValidPositiveSmi(intptr): bool;
+extern macro IsInteger(HeapNumber): bool;
+
extern macro HeapObjectToJSDataView(HeapObject): JSDataView
labels CastError;
extern macro HeapObjectToJSProxy(HeapObject): JSProxy
@@ -1713,7 +1843,7 @@ macro Cast<A: type>(o: HeapObject): A
labels CastError;
Cast<HeapObject>(o: HeapObject): HeapObject
- labels CastError {
+labels _CastError {
return o;
}
@@ -1837,6 +1967,11 @@ Cast<HeapNumber>(o: HeapObject): HeapNumber
goto CastError;
}
+Cast<BigInt>(o: HeapObject): BigInt labels CastError {
+ if (IsBigInt(o)) return %RawDownCast<BigInt>(o);
+ goto CastError;
+}
+
Cast<JSRegExp>(o: HeapObject): JSRegExp
labels CastError {
if (IsJSRegExp(o)) return %RawDownCast<JSRegExp>(o);
@@ -1849,9 +1984,9 @@ Cast<Map>(implicit context: Context)(o: HeapObject): Map
goto CastError;
}
-Cast<JSValue>(o: HeapObject): JSValue
+Cast<JSPrimitiveWrapper>(o: HeapObject): JSPrimitiveWrapper
labels CastError {
- if (IsJSValue(o)) return %RawDownCast<JSValue>(o);
+ if (IsJSPrimitiveWrapper(o)) return %RawDownCast<JSPrimitiveWrapper>(o);
goto CastError;
}
@@ -1915,24 +2050,24 @@ Cast<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
FastJSArrayForCopy
labels CastError {
if (IsArraySpeciesProtectorCellInvalid()) goto CastError;
- const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
- return %RawDownCast<FastJSArrayForCopy>(o);
+ const a = Cast<FastJSArray>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayForCopy>(a);
}
Cast<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
o: HeapObject): FastJSArrayWithNoCustomIteration
labels CastError {
if (IsArrayIteratorProtectorCellInvalid()) goto CastError;
- const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
- return %RawDownCast<FastJSArrayWithNoCustomIteration>(o);
+ const a = Cast<FastJSArray>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayWithNoCustomIteration>(a);
}
Cast<FastJSArrayForReadWithNoCustomIteration>(implicit context: Context)(
o: HeapObject): FastJSArrayForReadWithNoCustomIteration
labels CastError {
if (IsArrayIteratorProtectorCellInvalid()) goto CastError;
- const a: FastJSArrayForRead = Cast<FastJSArrayForRead>(o) otherwise CastError;
- return %RawDownCast<FastJSArrayForReadWithNoCustomIteration>(o);
+ const a = Cast<FastJSArrayForRead>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayForReadWithNoCustomIteration>(a);
}
Cast<JSReceiver>(implicit context: Context)(o: HeapObject): JSReceiver
@@ -1990,7 +2125,7 @@ extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
extern macro LoadNativeContext(Context): NativeContext;
extern macro TruncateFloat64ToFloat32(float64): float32;
-extern macro TruncateHeapNumberValueToWord32(Number): int32;
+extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32;
extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map;
extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr;
@@ -2007,13 +2142,14 @@ extern macro Float64Constant(constexpr float64): float64;
extern macro SmiConstant(constexpr int31): Smi;
extern macro SmiConstant(constexpr Smi): Smi;
extern macro SmiConstant(constexpr MessageTemplate): Smi;
+extern macro SmiConstant(constexpr LanguageMode): Smi;
extern macro BoolConstant(constexpr bool): bool;
extern macro StringConstant(constexpr string): String;
-extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode;
extern macro Int32Constant(constexpr ElementsKind): ElementsKind;
extern macro IntPtrConstant(constexpr NativeContextSlot): NativeContextSlot;
extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot;
extern macro IntPtrConstant(constexpr intptr): intptr;
+extern macro PointerConstant(constexpr RawPtr): RawPtr;
extern macro SingleCharacterStringConstant(constexpr string): String;
extern macro BitcastWordToTaggedSigned(intptr): Smi;
@@ -2126,6 +2262,9 @@ Convert<Number, int32>(i: int32): Number {
Convert<intptr, int32>(i: int32): intptr {
return ChangeInt32ToIntPtr(i);
}
+Convert<intptr, uint32>(i: uint32): intptr {
+ return Signed(ChangeUint32ToWord(i));
+}
Convert<Smi, int32>(i: int32): Smi {
return SmiFromInt32(i);
}
@@ -2333,10 +2472,6 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
FixedDoubleArray, intptr, float64): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
FixedDoubleArray, Smi, float64): void;
-operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
- a: FixedDoubleArray, i: Smi, n: Number): void {
- StoreFixedDoubleArrayElementSmi(a, i, Convert<float64>(n));
-}
operator '[]=' macro StoreFixedDoubleArrayDirect(
a: FixedDoubleArray, i: Smi, v: Number) {
a.floats[i] = Convert<float64>(v);
@@ -2418,7 +2553,7 @@ extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray;
extern macro AllocateJSArray(Map, FixedArrayBase, Smi): JSArray;
extern macro AllocateJSObjectFromMap(Map): JSObject;
extern macro AllocateJSObjectFromMap(
- Map, FixedArray, FixedArray, constexpr AllocationFlags,
+ Map, FixedArray | PropertyArray, FixedArray, constexpr AllocationFlags,
constexpr SlackTrackingMode): JSObject;
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
@@ -2531,10 +2666,10 @@ LoadElementNoHole<FixedArray>(implicit context: Context)(
a: JSArray, index: Smi): Object
labels IfHole {
try {
- let elements: FixedArray =
+ const elements: FixedArray =
Cast<FixedArray>(a.elements) otherwise Unexpected;
- let e: Object = elements.objects[index];
- if (e == Hole) {
+ const e: Object = elements.objects[index];
+ if (e == TheHole) {
goto IfHole;
}
return e;
@@ -2548,9 +2683,10 @@ LoadElementNoHole<FixedDoubleArray>(implicit context: Context)(
a: JSArray, index: Smi): Object
labels IfHole {
try {
- let elements: FixedDoubleArray =
+ const elements: FixedDoubleArray =
Cast<FixedDoubleArray>(a.elements) otherwise Unexpected;
- let e: float64 = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
+ const e: float64 =
+ LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
return AllocateHeapNumberWithValue(e);
}
label Unexpected {
@@ -2594,7 +2730,7 @@ struct FastJSArrayWitness {
} else {
const elements = Cast<FixedArray>(this.unstable.elements)
otherwise unreachable;
- StoreFixedArrayElement(elements, k, Hole);
+ StoreFixedArrayElement(elements, k, TheHole);
}
}
@@ -2638,12 +2774,12 @@ struct FastJSArrayWitness {
MoveElements(dst: intptr, src: intptr, length: intptr) {
assert(this.arrayIsPushable);
if (this.hasDoubles) {
- let elements: FixedDoubleArray =
+ const elements: FixedDoubleArray =
Cast<FixedDoubleArray>(this.unstable.elements)
otherwise unreachable;
TorqueMoveElements(elements, dst, src, length);
} else {
- let elements: FixedArray = Cast<FixedArray>(this.unstable.elements)
+ const elements: FixedArray = Cast<FixedArray>(this.unstable.elements)
otherwise unreachable;
if (this.hasSmis) {
TorqueMoveElementsSmi(elements, dst, src, length);
@@ -2662,17 +2798,62 @@ struct FastJSArrayWitness {
}
macro NewFastJSArrayWitness(array: FastJSArray): FastJSArrayWitness {
- let kind = array.map.elements_kind;
+ const kind = array.map.elements_kind;
return FastJSArrayWitness{
stable: array,
unstable: array,
map: array.map,
- hasDoubles: !IsElementsKindLessThanOrEqual(kind, HOLEY_ELEMENTS),
+ hasDoubles: IsDoubleElementsKind(kind),
hasSmis: IsElementsKindLessThanOrEqual(kind, HOLEY_SMI_ELEMENTS),
arrayIsPushable: false
};
}
+struct FastJSArrayForReadWitness {
+ Get(): FastJSArrayForRead {
+ return this.unstable;
+ }
+
+ Recheck() labels CastError {
+ if (this.stable.map != this.map) goto CastError;
+ // We don't need to check elements kind or whether the prototype
+ // has changed away from the default JSArray prototype, because
+ // if the map remains the same then those properties hold.
+ //
+ // However, we have to make sure there are no elements in the
+ // prototype chain.
+ if (IsNoElementsProtectorCellInvalid()) goto CastError;
+ this.unstable = %RawDownCast<FastJSArrayForRead>(this.stable);
+ }
+
+ LoadElementNoHole(implicit context: Context)(k: Smi): Object
+ labels FoundHole {
+ if (this.hasDoubles) {
+ return LoadElementNoHole<FixedDoubleArray>(this.unstable, k)
+ otherwise FoundHole;
+ } else {
+ return LoadElementNoHole<FixedArray>(this.unstable, k)
+ otherwise FoundHole;
+ }
+ }
+
+ const stable: JSArray;
+ unstable: FastJSArrayForRead;
+ const map: Map;
+ const hasDoubles: bool;
+}
+
+macro NewFastJSArrayForReadWitness(array: FastJSArrayForRead):
+ FastJSArrayForReadWitness {
+ const kind = array.map.elements_kind;
+ return FastJSArrayForReadWitness{
+ stable: array,
+ unstable: array,
+ map: array.map,
+ hasDoubles: IsDoubleElementsKind(kind)
+ };
+}
+
extern macro TransitionElementsKind(
JSObject, Map, constexpr ElementsKind,
constexpr ElementsKind): void labels Bailout;
@@ -2693,6 +2874,7 @@ extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
+extern macro IsBigInt(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
extern macro IsName(HeapObject): bool;
extern macro IsPrivateSymbol(HeapObject): bool;
@@ -2702,7 +2884,7 @@ extern macro IsOddball(HeapObject): bool;
extern macro IsSymbol(HeapObject): bool;
extern macro IsJSArrayMap(Map): bool;
extern macro IsExtensibleMap(Map): bool;
-extern macro IsJSValue(HeapObject): bool;
+extern macro IsJSPrimitiveWrapper(HeapObject): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
extern macro Typeof(Object): Object;
@@ -2713,7 +2895,7 @@ macro NumberIsNaN(number: Number): bool {
return false;
}
case (hn: HeapNumber): {
- let value: float64 = Convert<float64>(hn);
+ const value: float64 = Convert<float64>(hn);
return value != value;
}
}
@@ -2722,6 +2904,8 @@ macro NumberIsNaN(number: Number): bool {
extern macro GotoIfForceSlowPath() labels Taken;
extern macro BranchIfToBooleanIsTrue(Object): never
labels Taken, NotTaken;
+extern macro BranchIfToBooleanIsFalse(Object): never
+ labels Taken, NotTaken;
macro ToBoolean(obj: Object): bool {
if (BranchIfToBooleanIsTrue(obj)) {
@@ -2731,13 +2915,24 @@ macro ToBoolean(obj: Object): bool {
}
}
+@export
+macro RequireObjectCoercible(implicit context: Context)(
+ value: Object, name: constexpr string): Object {
+ if (IsNullOrUndefined(value)) {
+ ThrowTypeError(kCalledOnNullOrUndefined, name);
+ }
+ return value;
+}
+
+extern macro BranchIfSameValue(Object, Object): never labels Taken, NotTaken;
+
transitioning macro ToIndex(input: Object, context: Context): Number
labels RangeError {
if (input == Undefined) {
return 0;
}
- let value: Number = ToInteger_Inline(context, input, kTruncateMinusZero);
+ const value: Number = ToInteger_Inline(context, input, kTruncateMinusZero);
if (value < 0 || value > kMaxSafeInteger) {
goto RangeError;
}
@@ -2824,19 +3019,6 @@ macro BranchIfFastJSArrayForRead(o: Object, context: Context):
BranchIf<FastJSArrayForRead>(o) otherwise True, False;
}
-macro BranchIfNotFastJSArray(o: Object, context: Context): never
- labels True, False {
- BranchIfNot<FastJSArray>(o) otherwise True, False;
-}
-
-macro BranchIfFastJSArrayForCopy(o: Object, context: Context): never
- labels True, False {
- // Long-term, it's likely not a good idea to have this slow-path test here,
- // since it fundamentally breaks the type system.
- GotoIfForceSlowPath() otherwise False;
- BranchIf<FastJSArrayForCopy>(o) otherwise True, False;
-}
-
@export
macro IsFastJSArrayWithNoCustomIteration(context: Context, o: Object): bool {
return Is<FastJSArrayWithNoCustomIteration>(o);
@@ -2859,7 +3041,7 @@ namespace runtime {
transitioning builtin FastCreateDataProperty(implicit context: Context)(
receiver: JSReceiver, key: Object, value: Object): Object {
try {
- let array = Cast<FastJSArray>(receiver) otherwise Slow;
+ const array = Cast<FastJSArray>(receiver) otherwise Slow;
const index: Smi = Cast<Smi>(key) otherwise goto Slow;
if (index < 0 || index > array.length) goto Slow;
array::EnsureWriteableFastElements(array);
@@ -2929,3 +3111,46 @@ transitioning macro ToStringImpl(context: Context, o: Object): String {
}
unreachable;
}
+
+macro VerifiedUnreachable(): never {
+ StaticAssert(false);
+ unreachable;
+}
+
+macro Float64IsSomeInfinity(value: float64): bool {
+ if (value == V8_INFINITY) {
+ return true;
+ }
+ return value == (Convert<float64>(0) - V8_INFINITY);
+}
+
+@export
+macro IsIntegerOrSomeInfinity(o: Object): bool {
+ typeswitch (o) {
+ case (Smi): {
+ return true;
+ }
+ case (hn: HeapNumber): {
+ if (Float64IsSomeInfinity(Convert<float64>(hn))) {
+ return true;
+ }
+ return IsInteger(hn);
+ }
+ case (Object): {
+ return false;
+ }
+ }
+}
+
+builtin CheckNumberInRange(implicit context: Context)(
+ value: Number, min: Number, max: Number): Undefined {
+ if (IsIntegerOrSomeInfinity(value) && min <= value && value <= max) {
+ return Undefined;
+ } else {
+ Print('Range type assertion failed! (value/min/max)');
+ Print(value);
+ Print(min);
+ Print(max);
+ unreachable;
+ }
+}
diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq
new file mode 100644
index 0000000000..a1b1cb6780
--- /dev/null
+++ b/deps/v8/src/builtins/bigint.tq
@@ -0,0 +1,206 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-bigint-gen.h'
+
+// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
+// in global namespace
+@noVerifier
+extern class BigIntBase extends HeapObject generates 'TNode<BigInt>' {
+}
+
+type BigInt extends BigIntBase;
+
+@noVerifier
+@hasSameInstanceTypeAsParent
+extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
+}
+
+Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
+ assert(bigint::IsCanonicalized(i));
+ return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
+}
+
+namespace bigint {
+
+ const kPositiveSign: uint32 = 0;
+ const kNegativeSign: uint32 = 1;
+
+ extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+ extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+ extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare(
+ BigIntBase, BigIntBase): int32;
+
+ extern macro BigIntBuiltinsAssembler::ReadBigIntSign(BigIntBase): uint32;
+ extern macro BigIntBuiltinsAssembler::ReadBigIntLength(BigIntBase): intptr;
+ extern macro BigIntBuiltinsAssembler::WriteBigIntSignAndLength(
+ MutableBigInt, uint32, intptr): void;
+
+ extern macro CodeStubAssembler::AllocateBigInt(intptr): MutableBigInt;
+ extern macro CodeStubAssembler::StoreBigIntDigit(
+ MutableBigInt, intptr, uintptr): void;
+ extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr;
+
+ @export // Silence unused warning.
+ // TODO(szuend): Remove @export once macros that are only used in
+ // asserts are no longer detected as unused.
+ macro IsCanonicalized(bigint: BigIntBase): bool {
+ const length = ReadBigIntLength(bigint);
+
+ if (length == 0) {
+ return ReadBigIntSign(bigint) == kPositiveSign;
+ }
+
+ return LoadBigIntDigit(bigint, length - 1) != 0;
+ }
+
+ macro InvertSign(sign: uint32): uint32 {
+ return sign == kPositiveSign ? kNegativeSign : kPositiveSign;
+ }
+
+ macro AllocateEmptyBigIntNoThrow(implicit context: Context)(
+ sign: uint32, length: intptr): MutableBigInt labels BigIntTooBig {
+ if (length > kBigIntMaxLength) {
+ goto BigIntTooBig;
+ }
+ const result: MutableBigInt = AllocateBigInt(length);
+
+ WriteBigIntSignAndLength(result, sign, length);
+ return result;
+ }
+
+ macro AllocateEmptyBigInt(implicit context: Context)(
+ sign: uint32, length: intptr): MutableBigInt {
+ try {
+ return AllocateEmptyBigIntNoThrow(sign, length) otherwise BigIntTooBig;
+ }
+ label BigIntTooBig {
+ ThrowRangeError(kBigIntTooBig);
+ }
+ }
+
+ macro MutableBigIntAbsoluteCompare(x: BigIntBase, y: BigIntBase): int32 {
+ return CppAbsoluteCompare(x, y);
+ }
+
+ macro MutableBigIntAbsoluteSub(implicit context: Context)(
+ x: BigInt, y: BigInt, resultSign: uint32): BigInt {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+ const xsign = ReadBigIntSign(x);
+
+ assert(MutableBigIntAbsoluteCompare(x, y) >= 0);
+ if (xlength == 0) {
+ assert(ylength == 0);
+ return x;
+ }
+
+ if (ylength == 0) {
+ return resultSign == xsign ? x : BigIntUnaryMinus(x);
+ }
+
+ const result = AllocateEmptyBigInt(resultSign, xlength);
+ CppAbsoluteSubAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ }
+
+ macro MutableBigIntAbsoluteAdd(implicit context: Context)(
+ xBigint: BigInt, yBigint: BigInt,
+ resultSign: uint32): BigInt labels BigIntTooBig {
+ let xlength = ReadBigIntLength(xBigint);
+ let ylength = ReadBigIntLength(yBigint);
+
+ let x = xBigint;
+ let y = yBigint;
+ if (xlength < ylength) {
+ // Swap x and y so that x is longer.
+ x = yBigint;
+ y = xBigint;
+ const tempLength = xlength;
+ xlength = ylength;
+ ylength = tempLength;
+ }
+
+ // case: 0n + 0n
+ if (xlength == 0) {
+ assert(ylength == 0);
+ return x;
+ }
+
+ // case: x + 0n
+ if (ylength == 0) {
+ return resultSign == ReadBigIntSign(x) ? x : BigIntUnaryMinus(x);
+ }
+
+ // case: x + y
+ const result = AllocateEmptyBigIntNoThrow(resultSign, xlength + 1)
+ otherwise BigIntTooBig;
+ CppAbsoluteAddAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ }
+
+ macro BigIntAddImpl(implicit context: Context)(x: BigInt, y: BigInt): BigInt
+ labels BigIntTooBig {
+ const xsign = ReadBigIntSign(x);
+ const ysign = ReadBigIntSign(y);
+ if (xsign == ysign) {
+ // x + y == x + y
+ // -x + -y == -(x + y)
+ return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig;
+ }
+
+ // x + -y == x - y == -(y - x)
+ // -x + y == y - x == -(x - y)
+ if (MutableBigIntAbsoluteCompare(x, y) >= 0) {
+ return MutableBigIntAbsoluteSub(x, y, xsign);
+ }
+ return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign));
+ }
+
+ builtin BigIntAddNoThrow(implicit context: Context)(x: BigInt, y: BigInt):
+ Numeric {
+ try {
+ return BigIntAddImpl(x, y) otherwise BigIntTooBig;
+ }
+ label BigIntTooBig {
+ // Smi sentinal is used to signal BigIntTooBig exception.
+ return Convert<Smi>(0);
+ }
+ }
+
+ builtin BigIntAdd(implicit context: Context)(xNum: Numeric, yNum: Numeric):
+ BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntAddImpl(x, y) otherwise BigIntTooBig;
+ }
+ label MixedTypes {
+ ThrowTypeError(kBigIntMixedTypes);
+ }
+ label BigIntTooBig {
+ ThrowRangeError(kBigIntTooBig);
+ }
+ }
+
+ builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt {
+ const length = ReadBigIntLength(bigint);
+
+ // There is no -0n.
+ if (length == 0) {
+ return bigint;
+ }
+
+ const result =
+ AllocateEmptyBigInt(InvertSign(ReadBigIntSign(bigint)), length);
+ for (let i: intptr = 0; i < length; ++i) {
+ StoreBigIntDigit(result, i, LoadBigIntDigit(bigint, i));
+ }
+ return Convert<BigInt>(result);
+ }
+
+} // namespace bigint
diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq
index a41ef76d21..25f9ebd396 100644
--- a/deps/v8/src/builtins/boolean.tq
+++ b/deps/v8/src/builtins/boolean.tq
@@ -3,39 +3,20 @@
// found in the LICENSE file.
namespace boolean {
- const kNameDictionaryInitialCapacity:
- constexpr int32 generates 'NameDictionary::kInitialCapacity';
-
- extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool;
- extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
- NameDictionary;
-
- // TODO(v8:9120): This is a workaround to get access to target and new.target
- // in javascript builtins. Requires cleanup once this is fully supported by
- // torque.
- const NEW_TARGET_INDEX:
- constexpr int32 generates 'Descriptor::kJSNewTarget';
- const TARGET_INDEX: constexpr int32 generates 'Descriptor::kJSTarget';
- extern macro Parameter(constexpr int32): Object;
-
javascript builtin
- BooleanConstructor(context: Context, receiver: Object, ...arguments): Object {
+ BooleanConstructor(
+ js-implicit context: Context, receiver: Object, newTarget: Object,
+ target: JSFunction)(...arguments): Object {
const value = SelectBooleanConstant(ToBoolean(arguments[0]));
- const newTarget = Parameter(NEW_TARGET_INDEX);
if (newTarget == Undefined) {
return value;
}
- const target = UnsafeCast<JSFunction>(Parameter(TARGET_INDEX));
const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
- let properties = kEmptyFixedArray;
- if (IsDictionaryMap(map)) {
- properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
- }
- const obj = UnsafeCast<JSValue>(AllocateJSObjectFromMap(
- map, properties, kEmptyFixedArray, kNone, kWithSlackTracking));
+ const obj =
+ UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
obj.value = value;
return obj;
}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 7ee879ab51..0c30e52154 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -32,14 +32,16 @@ JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info,
JSObject js_obj_receiver = JSObject::cast(receiver);
FunctionTemplateInfo signature = FunctionTemplateInfo::cast(recv_type);
- // Check the receiver. Fast path for receivers with no hidden prototypes.
+ // Check the receiver.
if (signature.IsTemplateFor(js_obj_receiver)) return receiver;
- if (!js_obj_receiver.map().has_hidden_prototype()) return JSReceiver();
- for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype,
- PrototypeIterator::END_AT_NON_HIDDEN);
- !iter.IsAtEnd(); iter.Advance()) {
- JSObject current = iter.GetCurrent<JSObject>();
- if (signature.IsTemplateFor(current)) return current;
+
+ // The JSGlobalProxy might have a hidden prototype.
+ if (V8_UNLIKELY(js_obj_receiver.IsJSGlobalProxy())) {
+ HeapObject prototype = js_obj_receiver.map().prototype();
+ if (!prototype.IsNull(isolate)) {
+ JSObject js_obj_prototype = JSObject::cast(prototype);
+ if (signature.IsTemplateFor(js_obj_prototype)) return js_obj_prototype;
+ }
}
return JSReceiver();
}
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 6cc9fd9623..d65d57cc79 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -266,7 +266,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
Increment(&current_argument, kSystemPointerSize);
- Node* arg = LoadBufferObject(current_argument.value(), 0);
+ Node* arg = LoadBufferObject(
+ UncheckedCast<RawPtrT>(current_argument.value()), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
},
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 29bcae6feb..07f74cb429 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -227,7 +227,7 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
- GotoIf(IsDetachedBuffer(array_buffer), detached);
+ GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
Node* value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
@@ -402,7 +402,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
TNode<JSArray> array_receiver;
- Node* kind = nullptr;
+ TNode<Int32T> kind;
Label fast(this);
BranchIfFastJSArray(receiver, context, &fast, &runtime);
@@ -709,19 +709,19 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
iterator_assembler.GetIterator(context, items, iterator_method);
TNode<Context> native_context = LoadNativeContext(context);
- TNode<Object> fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ TNode<Map> fast_iterator_result_map = CAST(
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
Goto(&loop);
BIND(&loop);
{
// Loop while iterator is not done.
- TNode<Object> next = iterator_assembler.IteratorStep(
+ TNode<JSReceiver> next = iterator_assembler.IteratorStep(
context, iterator_record, &loop_done, fast_iterator_result_map);
TVARIABLE(Object, value,
- CAST(iterator_assembler.IteratorValue(
- context, next, fast_iterator_result_map)));
+ iterator_assembler.IteratorValue(context, next,
+ fast_iterator_result_map));
// If a map_function is supplied then call it (using this_arg as
// receiver), on the value returned from the iterator. Exceptions are
@@ -2035,8 +2035,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
&normal_sequence);
{
// Make elements kind holey and update elements kind in the type info.
- var_elements_kind =
- Signed(Word32Or(var_elements_kind.value(), Int32Constant(1)));
+ var_elements_kind = Word32Or(var_elements_kind.value(), Int32Constant(1));
StoreObjectFieldNoWriteBarrier(
allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask)));
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index e6ab965a7e..96c10ed0fd 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -970,8 +970,9 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
}
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS: {
- DCHECK(object->IsJSValue());
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+ DCHECK(object->IsJSPrimitiveWrapper());
+ Handle<JSPrimitiveWrapper> js_value =
+ Handle<JSPrimitiveWrapper>::cast(object);
DCHECK(js_value->value().IsString());
Handle<String> string(String::cast(js_value->value()), isolate);
uint32_t length = static_cast<uint32_t>(string->length());
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 03df1aaaad..a95365e425 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -36,6 +36,21 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
TNode<JSAsyncFunctionObject> async_function_object =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
+ // Push the promise for the {async_function_object} back onto the catch
+ // prediction stack to handle exceptions thrown after resuming from the
+ // await properly.
+ Label if_instrumentation(this, Label::kDeferred),
+ if_instrumentation_done(this);
+ Branch(IsDebugActive(), &if_instrumentation, &if_instrumentation_done);
+ BIND(&if_instrumentation);
+ {
+ TNode<JSPromise> promise = LoadObjectField<JSPromise>(
+ async_function_object, JSAsyncFunctionObject::kPromiseOffset);
+ CallRuntime(Runtime::kDebugAsyncFunctionResumed, context, promise);
+ Goto(&if_instrumentation_done);
+ }
+ BIND(&if_instrumentation_done);
+
// Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
// unnecessary runtime checks removed.
@@ -80,27 +95,19 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
Signed(IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count));
- // Allocate space for the promise, the async function object
- // and the register file.
- TNode<IntPtrT> size = IntPtrAdd(
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
- JSAsyncFunctionObject::kSize + FixedArray::kHeaderSize),
- Signed(WordShl(parameters_and_register_length,
- IntPtrConstant(kTaggedSizeLog2))));
- TNode<HeapObject> base = AllocateInNewSpace(size);
-
- // Initialize the register file.
- TNode<FixedArray> parameters_and_registers = UncheckedCast<FixedArray>(
- InnerAllocate(base, JSAsyncFunctionObject::kSize +
- JSPromise::kSizeWithEmbedderFields));
- StoreMapNoWriteBarrier(parameters_and_registers, RootIndex::kFixedArrayMap);
- StoreObjectFieldNoWriteBarrier(parameters_and_registers,
- FixedArray::kLengthOffset,
- SmiFromIntPtr(parameters_and_register_length));
+ // Allocate and initialize the register file.
+ TNode<FixedArrayBase> parameters_and_registers =
+ AllocateFixedArray(HOLEY_ELEMENTS, parameters_and_register_length,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), parameters_and_register_length,
RootIndex::kUndefinedValue);
+ // Allocate space for the promise, the async function object.
+ TNode<IntPtrT> size = IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
+ JSAsyncFunctionObject::kSize);
+ TNode<HeapObject> base = AllocateInNewSpace(size);
+
// Initialize the promise.
TNode<Context> native_context = LoadNativeContext(context);
TNode<JSFunction> promise_function =
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc
index 8a752f2517..d4818f0e01 100644
--- a/deps/v8/src/builtins/builtins-bigint-gen.cc
+++ b/deps/v8/src/builtins/builtins-bigint-gen.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-bigint-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.h b/deps/v8/src/builtins/builtins-bigint-gen.h
new file mode 100644
index 0000000000..288418258b
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-bigint-gen.h
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_BIGINT_GEN_H_
+#define V8_BUILTINS_BUILTINS_BIGINT_GEN_H_
+
+#include "src/codegen/code-stub-assembler.h"
+#include "src/objects/bigint.h"
+
+namespace v8 {
+namespace internal {
+
+class BigIntBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit BigIntBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<IntPtrT> ReadBigIntLength(TNode<BigInt> value) {
+ TNode<Word32T> bitfield = LoadBigIntBitfield(value);
+ return ChangeInt32ToIntPtr(
+ Signed(DecodeWord32<BigIntBase::LengthBits>(bitfield)));
+ }
+
+ TNode<Uint32T> ReadBigIntSign(TNode<BigInt> value) {
+ TNode<Word32T> bitfield = LoadBigIntBitfield(value);
+ return DecodeWord32<BigIntBase::SignBits>(bitfield);
+ }
+
+ void WriteBigIntSignAndLength(TNode<BigInt> bigint, TNode<Uint32T> sign,
+ TNode<IntPtrT> length) {
+ STATIC_ASSERT(BigIntBase::SignBits::kShift == 0);
+ TNode<Uint32T> bitfield = Unsigned(
+ Word32Or(Word32Shl(TruncateIntPtrToInt32(length),
+ Int32Constant(BigIntBase::LengthBits::kShift)),
+ Word32And(sign, Int32Constant(BigIntBase::SignBits::kMask))));
+ StoreBigIntBitfield(bigint, bitfield);
+ }
+
+ void CppAbsoluteAddAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference> mutable_big_int_absolute_add_and_canonicalize =
+ ExternalConstant(
+ ExternalReference::
+ mutable_big_int_absolute_add_and_canonicalize_function());
+ CallCFunction(mutable_big_int_absolute_add_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppAbsoluteSubAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference> mutable_big_int_absolute_sub_and_canonicalize =
+ ExternalConstant(
+ ExternalReference::
+ mutable_big_int_absolute_sub_and_canonicalize_function());
+ CallCFunction(mutable_big_int_absolute_sub_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ TNode<Int32T> CppAbsoluteCompare(TNode<BigInt> x, TNode<BigInt> y) {
+ TNode<ExternalReference> mutable_big_int_absolute_compare =
+ ExternalConstant(
+ ExternalReference::mutable_big_int_absolute_compare_function());
+ TNode<Int32T> result = UncheckedCast<Int32T>(
+ CallCFunction(mutable_big_int_absolute_compare, MachineType::Int32(),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y)));
+ return result;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_BUILTINS_BUILTINS_BIGINT_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index a8a847ef47..09d71a0562 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -80,10 +80,10 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
// 1. If Type(value) is BigInt, return value.
if (value->IsBigInt()) return Handle<BigInt>::cast(value);
// 2. If Type(value) is Object and value has a [[BigIntData]] internal slot:
- if (value->IsJSValue()) {
+ if (value->IsJSPrimitiveWrapper()) {
// 2a. Assert: value.[[BigIntData]] is a BigInt value.
// 2b. Return value.[[BigIntData]].
- Object data = JSValue::cast(*value).value();
+ Object data = JSPrimitiveWrapper::cast(*value).value();
if (data.IsBigInt()) return handle(BigInt::cast(data), isolate);
}
// 3. Throw a TypeError exception.
diff --git a/deps/v8/src/builtins/builtins-boolean-gen.cc b/deps/v8/src/builtins/builtins-boolean-gen.cc
index 30cf7ba0c1..74474a8918 100644
--- a/deps/v8/src/builtins/builtins-boolean-gen.cc
+++ b/deps/v8/src/builtins/builtins-boolean-gen.cc
@@ -15,22 +15,23 @@ namespace internal {
// ES6 #sec-boolean.prototype.tostring
TF_BUILTIN(BooleanPrototypeToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* value = ToThisValue(context, receiver, PrimitiveType::kBoolean,
- "Boolean.prototype.toString");
- Node* result = LoadObjectField(value, Oddball::kToStringOffset);
+ TNode<Oddball> value =
+ CAST(ToThisValue(context, receiver, PrimitiveType::kBoolean,
+ "Boolean.prototype.toString"));
+ TNode<String> result = CAST(LoadObjectField(value, Oddball::kToStringOffset));
Return(result);
}
// ES6 #sec-boolean.prototype.valueof
TF_BUILTIN(BooleanPrototypeValueOf, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* result = ToThisValue(context, receiver, PrimitiveType::kBoolean,
- "Boolean.prototype.valueOf");
+ TNode<Oddball> result = CAST(ToThisValue(
+ context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf"));
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 05142a8f07..deb91dee24 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -475,14 +475,13 @@ TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
BIND(&holder_next);
{
- // Continue with the hidden prototype of the {holder} if it
- // has one, or throw an illegal invocation exception, since
- // the receiver did not pass the {signature} check.
+ // Continue with the hidden prototype of the {holder} if it is a
+ // JSGlobalProxy (the hidden prototype can either be null or a
+ // JSObject in that case), or throw an illegal invocation exception,
+ // since the receiver did not pass the {signature} check.
TNode<Map> holder_map = LoadMap(holder);
var_holder = LoadMapPrototype(holder_map);
- GotoIf(IsSetWord32(LoadMapBitField2(holder_map),
- Map::HasHiddenPrototypeBit::kMask),
- &holder_loop);
+ GotoIf(IsJSGlobalProxyMap(holder_map), &holder_loop);
ThrowTypeError(context, MessageTemplate::kIllegalInvocation);
}
}
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index d98eba4eeb..d1082291ef 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -8,6 +8,7 @@
#include "src/logging/counters.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/stack-frame-info.h"
namespace v8 {
namespace internal {
@@ -76,6 +77,9 @@ BUILTIN(CallSitePrototypeGetFunction) {
StackFrameBase* frame = it.Frame();
if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
+
+ isolate->CountUsage(v8::Isolate::kCallSiteAPIGetFunctionSloppyCall);
+
return *frame->GetFunction();
}
@@ -135,6 +139,9 @@ BUILTIN(CallSitePrototypeGetThis) {
StackFrameBase* frame = it.Frame();
if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
+
+ isolate->CountUsage(v8::Isolate::kCallSiteAPIGetThisSloppyCall);
+
return *frame->GetReceiver();
}
@@ -197,9 +204,9 @@ BUILTIN(CallSitePrototypeIsToplevel) {
BUILTIN(CallSitePrototypeToString) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "toString");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- RETURN_RESULT_OR_FAILURE(isolate, it.Frame()->ToString());
+ Handle<StackTraceFrame> frame = isolate->factory()->NewStackTraceFrame(
+ GetFrameArray(isolate, recv), GetFrameIndex(isolate, recv));
+ RETURN_RESULT_OR_FAILURE(isolate, SerializeStackTraceFrame(isolate, frame));
}
#undef CHECK_CALLSITE
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index b5a9851c70..613e5f10ff 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -66,19 +66,19 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
TNode<Object> iterable);
// Constructs a collection instance. Choosing a fast path when possible.
- TNode<Object> AllocateJSCollection(TNode<Context> context,
- TNode<JSFunction> constructor,
- TNode<Object> new_target);
+ TNode<JSObject> AllocateJSCollection(TNode<Context> context,
+ TNode<JSFunction> constructor,
+ TNode<JSReceiver> new_target);
// Fast path for constructing a collection instance if the constructor
// function has not been modified.
- TNode<Object> AllocateJSCollectionFast(TNode<HeapObject> constructor);
+ TNode<JSObject> AllocateJSCollectionFast(TNode<JSFunction> constructor);
// Fallback for constructing a collection instance if the constructor function
// has been modified.
- TNode<Object> AllocateJSCollectionSlow(TNode<Context> context,
- TNode<JSFunction> constructor,
- TNode<Object> new_target);
+ TNode<JSObject> AllocateJSCollectionSlow(TNode<Context> context,
+ TNode<JSFunction> constructor,
+ TNode<JSReceiver> new_target);
// Allocates the backing store for a collection.
virtual TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
@@ -320,17 +320,17 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
- TNode<Object> fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ TNode<Map> fast_iterator_result_map = CAST(
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
TVARIABLE(Object, var_exception);
Goto(&loop);
BIND(&loop);
{
- TNode<Object> next = iterator_assembler.IteratorStep(
+ TNode<JSReceiver> next = iterator_assembler.IteratorStep(
context, iterator, &exit, fast_iterator_result_map);
- TNode<Object> next_value = CAST(iterator_assembler.IteratorValue(
- context, next, fast_iterator_result_map));
+ TNode<Object> next_value = iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map);
AddConstructorEntry(variant, context, collection, add_func, next_value,
nullptr, &if_exception, &var_exception);
Goto(&loop);
@@ -367,33 +367,33 @@ void BaseCollectionsAssembler::GotoIfInitialAddFunctionModified(
GetAddFunctionNameIndex(variant), if_modified);
}
-TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
+TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollection(
TNode<Context> context, TNode<JSFunction> constructor,
- TNode<Object> new_target) {
+ TNode<JSReceiver> new_target) {
TNode<BoolT> is_target_unmodified = WordEqual(constructor, new_target);
- return Select<Object>(is_target_unmodified,
- [=] { return AllocateJSCollectionFast(constructor); },
- [=] {
- return AllocateJSCollectionSlow(context, constructor,
- new_target);
- });
+ return Select<JSObject>(
+ is_target_unmodified,
+ [=] { return AllocateJSCollectionFast(constructor); },
+ [=] {
+ return AllocateJSCollectionSlow(context, constructor, new_target);
+ });
}
-TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
- TNode<HeapObject> constructor) {
+TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollectionFast(
+ TNode<JSFunction> constructor) {
CSA_ASSERT(this, IsConstructorMap(LoadMap(constructor)));
- TNode<Object> initial_map =
- LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
- return CAST(AllocateJSObjectFromMap(initial_map));
+ TNode<Map> initial_map =
+ CAST(LoadJSFunctionPrototypeOrInitialMap(constructor));
+ return AllocateJSObjectFromMap(initial_map);
}
-TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
+TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollectionSlow(
TNode<Context> context, TNode<JSFunction> constructor,
- TNode<Object> new_target) {
+ TNode<JSReceiver> new_target) {
ConstructorBuiltinsAssembler constructor_assembler(this->state());
- return CAST(constructor_assembler.EmitFastNewObject(context, constructor,
- new_target));
+ return constructor_assembler.EmitFastNewObject(context, constructor,
+ new_target);
}
void BaseCollectionsAssembler::GenerateConstructor(
@@ -408,7 +408,7 @@ void BaseCollectionsAssembler::GenerateConstructor(
TNode<Context> native_context = LoadNativeContext(context);
TNode<Object> collection = AllocateJSCollection(
- context, GetConstructor(variant, native_context), new_target);
+ context, GetConstructor(variant, native_context), CAST(new_target));
AddConstructorEntries(variant, context, native_context, collection, iterable);
Return(collection);
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 973f1785d1..9ab3566cec 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -39,7 +39,8 @@ namespace internal {
namespace {
void ConsoleCall(
- Isolate* isolate, internal::BuiltinArguments& args,
+ Isolate* isolate,
+ internal::BuiltinArguments& args, // NOLINT(runtime/references)
void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext&)) {
CHECK(!isolate->has_pending_exception());
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index a725f3c4a1..767e626432 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -147,44 +147,40 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
}
TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* target = Parameter(Descriptor::kTarget);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<JSReceiver> new_target = CAST(Parameter(Descriptor::kNewTarget));
Label call_runtime(this);
- Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+ TNode<JSObject> result =
+ EmitFastNewObject(context, target, new_target, &call_runtime);
Return(result);
BIND(&call_runtime);
TailCallRuntime(Runtime::kNewObject, context, target, new_target);
}
-Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
- Node* target,
- Node* new_target) {
- VARIABLE(var_obj, MachineRepresentation::kTagged);
+compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
+ SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target) {
+ TVARIABLE(JSObject, var_obj);
Label call_runtime(this), end(this);
- Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
- var_obj.Bind(result);
+ var_obj = EmitFastNewObject(context, target, new_target, &call_runtime);
Goto(&end);
BIND(&call_runtime);
- var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target));
+ var_obj = CAST(CallRuntime(Runtime::kNewObject, context, target, new_target));
Goto(&end);
BIND(&end);
return var_obj.value();
}
-Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
- Node* target,
- Node* new_target,
- Label* call_runtime) {
- CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE));
- CSA_ASSERT(this, IsJSReceiver(new_target));
-
+compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
+ SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target, Label* call_runtime) {
// Verify that the new target is a JSFunction.
Label fast(this), end(this);
GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast);
@@ -732,7 +728,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
TNode<JSFunction> target = LoadTargetFromFrame();
Node* result =
CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, n_value);
+ StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, n_value);
args.PopAndReturn(result);
}
}
@@ -798,7 +794,7 @@ TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
Node* result =
CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, s_value);
+ StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, s_value);
args.PopAndReturn(result);
}
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 9093a5a77b..9208506c79 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -31,10 +31,14 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Label* call_runtime);
Node* EmitCreateEmptyObjectLiteral(Node* context);
- Node* EmitFastNewObject(Node* context, Node* target, Node* new_target);
-
- Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
- Label* call_runtime);
+ TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
+ SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target);
+
+ TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
+ SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target,
+ Label* call_runtime);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index bc7e349ce1..71a9cbf145 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -392,7 +392,8 @@ TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) {
// ES6 section 7.1.13 ToObject (argument)
TF_BUILTIN(ToObject, CodeStubAssembler) {
Label if_smi(this, Label::kDeferred), if_jsreceiver(this),
- if_noconstructor(this, Label::kDeferred), if_wrapjsvalue(this);
+ if_noconstructor(this, Label::kDeferred),
+ if_wrapjs_primitive_wrapper(this);
Node* context = Parameter(Descriptor::kContext);
Node* object = Parameter(Descriptor::kArgument);
@@ -411,27 +412,30 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
IntPtrConstant(Map::kNoConstructorFunctionIndex)),
&if_noconstructor);
constructor_function_index_var.Bind(constructor_function_index);
- Goto(&if_wrapjsvalue);
+ Goto(&if_wrapjs_primitive_wrapper);
BIND(&if_smi);
constructor_function_index_var.Bind(
IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
- Goto(&if_wrapjsvalue);
+ Goto(&if_wrapjs_primitive_wrapper);
- BIND(&if_wrapjsvalue);
+ BIND(&if_wrapjs_primitive_wrapper);
TNode<Context> native_context = LoadNativeContext(context);
Node* constructor = LoadContextElement(
native_context, constructor_function_index_var.value());
Node* initial_map =
LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
- Node* js_value = Allocate(JSValue::kSize);
- StoreMapNoWriteBarrier(js_value, initial_map);
- StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset,
+ Node* js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize);
+ StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map);
+ StoreObjectFieldRoot(js_primitive_wrapper,
+ JSPrimitiveWrapper::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+ StoreObjectFieldRoot(js_primitive_wrapper,
+ JSPrimitiveWrapper::kElementsOffset,
RootIndex::kEmptyFixedArray);
- StoreObjectField(js_value, JSValue::kValueOffset, object);
- Return(js_value);
+ StoreObjectField(js_primitive_wrapper, JSPrimitiveWrapper::kValueOffset,
+ object);
+ Return(js_primitive_wrapper);
BIND(&if_noconstructor);
ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject,
diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h
index eeb84f34db..d5c6571880 100644
--- a/deps/v8/src/builtins/builtins-data-view-gen.h
+++ b/deps/v8/src/builtins/builtins-data-view-gen.h
@@ -17,13 +17,13 @@ class DataViewBuiltinsAssembler : public CodeStubAssembler {
explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- TNode<Int32T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
- return UncheckedCast<Int32T>(
+ TNode<Uint8T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
+ return UncheckedCast<Uint8T>(
Load(MachineType::Uint8(), data_pointer, offset));
}
- TNode<Int32T> LoadInt8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
- return UncheckedCast<Int32T>(
+ TNode<Int8T> LoadInt8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
+ return UncheckedCast<Int8T>(
Load(MachineType::Int8(), data_pointer, offset));
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 3412edb89d..23ab4a88ca 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -103,8 +103,8 @@ namespace internal {
\
/* String helpers */ \
TFC(StringCharAt, StringAt) \
- TFC(StringCodePointAtUTF16, StringAt) \
- TFC(StringCodePointAtUTF32, StringAt) \
+ TFC(StringCodePointAt, StringAt) \
+ TFC(StringFromCodePointAt, StringAtAsString) \
TFC(StringEqual, Compare) \
TFC(StringGreaterThan, Compare) \
TFC(StringGreaterThanOrEqual, Compare) \
@@ -170,7 +170,9 @@ namespace internal {
\
/* Adapters for Turbofan into runtime */ \
TFC(AllocateInYoungGeneration, Allocate) \
+ TFC(AllocateRegularInYoungGeneration, Allocate) \
TFC(AllocateInOldGeneration, Allocate) \
+ TFC(AllocateRegularInOldGeneration, Allocate) \
\
/* TurboFan support builtins */ \
TFS(CopyFastSmiOrObjectElements, kObject) \
@@ -266,7 +268,7 @@ namespace internal {
\
/* Abort */ \
TFC(Abort, Abort) \
- TFC(AbortJS, Abort) \
+ TFC(AbortCSAAssert, Abort) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -726,16 +728,12 @@ namespace internal {
CPP(ObjectGetOwnPropertyDescriptors) \
TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
- CPP(ObjectGetPrototypeOf) \
- CPP(ObjectSetPrototypeOf) \
TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \
- CPP(ObjectIsExtensible) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
TFJ(ObjectKeys, 1, kReceiver, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
- CPP(ObjectPreventExtensions) \
/* ES6 #sec-object.prototype.tostring */ \
TFJ(ObjectPrototypeToString, 0, kReceiver) \
/* ES6 #sec-object.prototype.valueof */ \
@@ -823,16 +821,10 @@ namespace internal {
ASM(ReflectApply, Dummy) \
ASM(ReflectConstruct, Dummy) \
CPP(ReflectDefineProperty) \
- CPP(ReflectDeleteProperty) \
- CPP(ReflectGet) \
CPP(ReflectGetOwnPropertyDescriptor) \
- CPP(ReflectGetPrototypeOf) \
TFJ(ReflectHas, 2, kReceiver, kTarget, kKey) \
- CPP(ReflectIsExtensible) \
CPP(ReflectOwnKeys) \
- CPP(ReflectPreventExtensions) \
CPP(ReflectSet) \
- CPP(ReflectSetPrototypeOf) \
\
/* RegExp */ \
CPP(RegExpCapture1Getter) \
@@ -1150,6 +1142,7 @@ namespace internal {
ASM(StackCheck, Dummy) \
ASM(DoubleToI, Dummy) \
TFC(GetProperty, GetProperty) \
+ TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \
TFS(SetProperty, kReceiver, kKey, kValue) \
TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \
ASM(MemCopyUint8Uint8, CCall) \
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index e099baeb34..3bcc7356d4 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -31,10 +31,11 @@ BUILTIN(ErrorConstructor) {
}
RETURN_RESULT_OR_FAILURE(
- isolate, ErrorUtils::Construct(isolate, args.target(),
- Handle<Object>::cast(args.new_target()),
- args.atOrUndefined(isolate, 1), mode,
- caller, false));
+ isolate,
+ ErrorUtils::Construct(isolate, args.target(),
+ Handle<Object>::cast(args.new_target()),
+ args.atOrUndefined(isolate, 1), mode, caller,
+ ErrorUtils::StackTraceCollection::kDetailed));
}
// static
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 53e974c452..137f7f3402 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -86,17 +86,27 @@ BUILTIN(GlobalEval) {
Handle<Object> x = args.atOrUndefined(isolate, 1);
Handle<JSFunction> target = args.target();
Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
- if (!x->IsString()) return *x;
if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined);
return ReadOnlyRoots(isolate).undefined_value();
}
+
+ // Run embedder pre-checks before executing eval. If the argument is a
+ // non-String (or other object the embedder doesn't know to handle), then
+ // return it directly.
+ MaybeHandle<String> source;
+ bool unhandled_object;
+ std::tie(source, unhandled_object) =
+ Compiler::ValidateDynamicCompilationSource(
+ isolate, handle(target->native_context(), isolate), x);
+ if (unhandled_object) return *x;
+
Handle<JSFunction> function;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, function,
- Compiler::GetFunctionFromString(handle(target->native_context(), isolate),
- Handle<String>::cast(x),
- NO_PARSE_RESTRICTION, kNoSourcePosition));
+ Compiler::GetFunctionFromValidatedString(
+ handle(target->native_context(), isolate), source,
+ NO_PARSE_RESTRICTION, kNoSourcePosition));
RETURN_RESULT_OR_FAILURE(
isolate,
Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index d1b50f2cdc..973356f569 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -28,7 +28,8 @@ class HandlerBuiltinsAssembler : public CodeStubAssembler {
// kind. Use with caution. This produces a *lot* of code.
using ElementsKindSwitchCase = std::function<void(ElementsKind)>;
void DispatchByElementsKind(TNode<Int32T> elements_kind,
- const ElementsKindSwitchCase& case_function);
+ const ElementsKindSwitchCase& case_function,
+ bool handle_typed_elements_kind);
// Dispatches over all possible combinations of {from,to} elements kinds.
using ElementsKindTransitionSwitchCase =
@@ -48,7 +49,7 @@ TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
Node* value = Parameter(Descriptor::kReceiver);
- Node* string = LoadJSValueValue(value);
+ Node* string = LoadJSPrimitiveWrapperValue(value);
Return(LoadStringLengthAsSmi(string));
}
@@ -227,7 +228,7 @@ void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore(
[=, &miss](ElementsKind from_kind, ElementsKind to_kind) {
TransitionElementsKind(receiver, map, from_kind, to_kind, &miss);
EmitElementStore(receiver, key, value, to_kind, store_mode, &miss,
- context);
+ context, nullptr);
});
Return(value);
}
@@ -280,7 +281,8 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
V(BIGINT64_ELEMENTS)
void HandlerBuiltinsAssembler::DispatchByElementsKind(
- TNode<Int32T> elements_kind, const ElementsKindSwitchCase& case_function) {
+ TNode<Int32T> elements_kind, const ElementsKindSwitchCase& case_function,
+ bool handle_typed_elements_kind) {
Label next(this), if_unknown_type(this, Label::kDeferred);
int32_t elements_kinds[] = {
@@ -300,6 +302,8 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
};
STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
+ // TODO(mythria): Do not emit cases for typed elements kind when
+ // handle_typed_elements is false to decrease the size of the jump table.
Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
arraysize(elements_kinds));
@@ -310,6 +314,9 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
IsFrozenOrSealedElementsKindUnchecked(KIND)) { \
/* Disable support for frozen or sealed elements kinds. */ \
Unreachable(); \
+ } else if (!handle_typed_elements_kind && \
+ IsTypedArrayElementsKind(KIND)) { \
+ Unreachable(); \
} else { \
case_function(KIND); \
Goto(&next); \
@@ -340,17 +347,26 @@ void HandlerBuiltinsAssembler::Generate_StoreFastElementIC(
Label miss(this);
+ bool handle_typed_elements_kind =
+ store_mode == STANDARD_STORE || store_mode == STORE_IGNORE_OUT_OF_BOUNDS;
+ // For typed arrays maybe_converted_value contains the value obtained after
+ // calling ToNumber. We should pass the converted value to the runtime to
+ // avoid doing the user visible conversion again.
+ VARIABLE(maybe_converted_value, MachineRepresentation::kTagged, value);
+ maybe_converted_value.Bind(value);
// TODO(v8:8481): Pass elements_kind in feedback vector slots.
- DispatchByElementsKind(LoadElementsKind(receiver),
- [=, &miss](ElementsKind elements_kind) {
- EmitElementStore(receiver, key, value, elements_kind,
- store_mode, &miss, context);
- });
+ DispatchByElementsKind(
+ LoadElementsKind(receiver),
+ [=, &miss, &maybe_converted_value](ElementsKind elements_kind) {
+ EmitElementStore(receiver, key, value, elements_kind, store_mode, &miss,
+ context, &maybe_converted_value);
+ },
+ handle_typed_elements_kind);
Return(value);
BIND(&miss);
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
- receiver, key);
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context,
+ maybe_converted_value.value(), slot, vector, receiver, key);
}
TF_BUILTIN(StoreFastElementIC_Standard, HandlerBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index baaadb722a..8d22767b58 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -614,8 +614,9 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
Label if_done(this), if_noelements(this),
if_sourcenotjsobject(this, Label::kDeferred);
- // JSValue wrappers for numbers don't have any enumerable own properties,
- // so we can immediately skip the whole operation if {source} is a Smi.
+ // JSPrimitiveWrapper wrappers for numbers don't have any enumerable own
+ // properties, so we can immediately skip the whole operation if {source} is
+ // a Smi.
GotoIf(TaggedIsSmi(source), &if_done);
// Otherwise check if {source} is a proper JSObject, and if not, defer
@@ -809,17 +810,49 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+ TNode<Smi> allocation_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(true)));
TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
- SmiFromIntPtr(requested_size));
+ SmiFromIntPtr(requested_size), allocation_flags);
+}
+
+TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) {
+ TNode<IntPtrT> requested_size =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+
+ TNode<Smi> allocation_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(false)));
+ TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
+ SmiFromIntPtr(requested_size), allocation_flags);
}
TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+
+ TNode<Smi> runtime_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(true)));
+ TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
+ SmiFromIntPtr(requested_size), runtime_flags);
+}
+
+TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) {
+ TNode<IntPtrT> requested_size =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+ TNode<Smi> runtime_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(false)));
TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
- SmiFromIntPtr(requested_size), SmiConstant(0));
+ SmiFromIntPtr(requested_size), runtime_flags);
}
TF_BUILTIN(Abort, CodeStubAssembler) {
@@ -827,9 +860,9 @@ TF_BUILTIN(Abort, CodeStubAssembler) {
TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
}
-TF_BUILTIN(AbortJS, CodeStubAssembler) {
+TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
TNode<String> message = CAST(Parameter(Descriptor::kMessageOrMessageId));
- TailCallRuntime(Runtime::kAbortJS, NoContextConstant(), message);
+ TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
@@ -907,6 +940,8 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
Node* object = Parameter(Descriptor::kObject);
Node* key = Parameter(Descriptor::kKey);
Node* context = Parameter(Descriptor::kContext);
+ // TODO(duongn): consider tailcalling to GetPropertyWithReceiver(object,
+ // object, key, OnNonExistent::kReturnUndefined).
Label if_notfound(this), if_proxy(this, Label::kDeferred),
if_slow(this, Label::kDeferred);
@@ -932,7 +967,7 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
Goto(if_bailout);
};
- TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ TryPrototypeChainLookup(object, object, key, lookup_property_in_holder,
lookup_element_in_holder, &if_notfound, &if_slow,
&if_proxy);
@@ -955,6 +990,74 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
}
}
+// ES6 [[Get]] operation with Receiver.
+TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
+ Node* object = Parameter(Descriptor::kObject);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* on_non_existent = Parameter(Descriptor::kOnNonExistent);
+ Label if_notfound(this), if_proxy(this, Label::kDeferred),
+ if_slow(this, Label::kDeferred);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [=](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name, Label* next_holder,
+ Label* if_bailout) {
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ Label if_found(this);
+ TryGetOwnProperty(context, receiver, holder, holder_map,
+ holder_instance_type, unique_name, &if_found,
+ &var_value, next_holder, if_bailout);
+ BIND(&if_found);
+ Return(var_value.value());
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [=](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index, Label* next_holder,
+ Label* if_bailout) {
+ // Not supported yet.
+ Use(next_holder);
+ Goto(if_bailout);
+ };
+
+ TryPrototypeChainLookup(receiver, object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &if_notfound, &if_slow,
+ &if_proxy);
+
+ BIND(&if_notfound);
+ Label throw_reference_error(this);
+ GotoIf(WordEqual(on_non_existent,
+ SmiConstant(OnNonExistent::kThrowReferenceError)),
+ &throw_reference_error);
+ CSA_ASSERT(this, WordEqual(on_non_existent,
+ SmiConstant(OnNonExistent::kReturnUndefined)));
+ Return(UndefinedConstant());
+
+ BIND(&throw_reference_error);
+ Return(CallRuntime(Runtime::kThrowReferenceError, context, key));
+
+ BIND(&if_slow);
+ TailCallRuntime(Runtime::kGetPropertyWithReceiver, context, object, key,
+ receiver, on_non_existent);
+
+ BIND(&if_proxy);
+ {
+ // Convert the {key} to a Name first.
+ Node* name = CallBuiltin(Builtins::kToName, context, key);
+
+ // Proxy cannot handle private symbol so bailout.
+ GotoIf(IsPrivateSymbol(name), &if_slow);
+
+ // The {object} is a JSProxy instance, look up the {name} on it, passing
+ // {object} both as receiver and holder. If {name} is absent we can safely
+ // return undefined from here.
+ TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name,
+ receiver, on_non_existent);
+ }
+}
+
// ES6 [[Set]] operation.
TF_BUILTIN(SetProperty, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 882afa3c32..ff8e96f4f5 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -276,15 +276,14 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
// 2. Let format be ? OrdinaryCreateFromConstructor(newTarget,
// "%<T>Prototype%", ...).
- Handle<JSObject> obj;
+ Handle<Map> map;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<T> format = Handle<T>::cast(obj);
+ isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
// 3. Perform ? Initialize<T>(Format, locales, options).
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, format, T::Initialize(isolate, format, locales, options));
+ Handle<T> format;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, format,
+ T::New(isolate, map, locales, options));
// 4. Let this be the this value.
Handle<Object> receiver = args.receiver();
@@ -351,21 +350,17 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<JSObject> obj;
+ Handle<Map> map;
// 2. Let result be OrdinaryCreateFromConstructor(NewTarget,
// "%<T>Prototype%").
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<T> result = Handle<T>::cast(obj);
- result->set_flags(0);
+ isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
Handle<Object> locales = args.atOrUndefined(isolate, 1);
Handle<Object> options = args.atOrUndefined(isolate, 2);
- // 3. Return Initialize<T>(t, locales, options).
- RETURN_RESULT_OR_FAILURE(isolate,
- T::Initialize(isolate, result, locales, options));
+ // 3. Return New<T>(t, locales, options).
+ RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options));
}
/**
@@ -387,14 +382,11 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) {
Handle<Object> locales = args.atOrUndefined(isolate, 1);
Handle<Object> options = args.atOrUndefined(isolate, 2);
- Handle<JSObject> obj;
+ Handle<Map> map;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<T> result = Handle<T>::cast(obj);
+ isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
- RETURN_RESULT_OR_FAILURE(isolate,
- T::Initialize(isolate, result, locales, options));
+ RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options));
}
} // namespace
@@ -591,12 +583,11 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<JSReceiver> new_target,
Handle<Object> tag, Handle<Object> options) {
- Handle<JSObject> locale;
+ Handle<Map> map;
// 6. Let locale be ? OrdinaryCreateFromConstructor(NewTarget,
// %LocalePrototype%, internalSlotsList).
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, locale,
- JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
+ isolate, map, JSFunction::GetDerivedMap(isolate, constructor, new_target),
JSLocale);
// 7. If Type(tag) is not String or Object, throw a TypeError exception.
@@ -628,8 +619,7 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
Object::ToObject(isolate, options), JSLocale);
}
- return JSLocale::Initialize(isolate, Handle<JSLocale>::cast(locale),
- locale_string, options_object);
+ return JSLocale::New(isolate, map, locale_string, options_object);
}
} // namespace
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 0484501bfb..b3d8e27dbc 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -75,7 +75,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
}
}
-TNode<Object> IteratorBuiltinsAssembler::IteratorStep(
+TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
Node* context, const IteratorRecord& iterator, Label* if_done,
Node* fast_iterator_result_map, Label* if_exception, Variable* exception) {
DCHECK_NOT_NULL(if_done);
@@ -125,23 +125,21 @@ TNode<Object> IteratorBuiltinsAssembler::IteratorStep(
}
BIND(&return_result);
- return UncheckedCast<Object>(result);
+ return CAST(result);
}
-Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
- Node* fast_iterator_result_map,
- Label* if_exception,
- Variable* exception) {
- CSA_ASSERT(this, IsJSReceiver(result));
-
+TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
+ TNode<Context> context, TNode<JSReceiver> result,
+ base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_exception,
+ Variable* exception) {
Label exit(this);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- if (fast_iterator_result_map != nullptr) {
+ TVARIABLE(Object, var_value);
+ if (fast_iterator_result_map) {
// Fast iterator result case:
Label if_generic(this);
Node* map = LoadMap(result);
- GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic);
- var_value.Bind(LoadObjectField(result, JSIteratorResult::kValueOffset));
+ GotoIfNot(WordEqual(map, *fast_iterator_result_map), &if_generic);
+ var_value = LoadObjectField(result, JSIteratorResult::kValueOffset);
Goto(&exit);
BIND(&if_generic);
@@ -149,9 +147,10 @@ Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
// Generic iterator result case:
{
- Node* value = GetProperty(context, result, factory()->value_string());
+ TNode<Object> value =
+ GetProperty(context, result, factory()->value_string());
GotoIfException(value, if_exception, exception);
- var_value.Bind(value);
+ var_value = value;
Goto(&exit);
}
@@ -217,10 +216,10 @@ TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
BIND(&loop_start);
{
// a. Set next to ? IteratorStep(iteratorRecord).
- TNode<Object> next = IteratorStep(context, iterator_record, &done);
+ TNode<JSReceiver> next = IteratorStep(context, iterator_record, &done);
// b. If next is not false, then
// i. Let nextValue be ? IteratorValue(next).
- TNode<Object> next_value = CAST(IteratorValue(context, next));
+ TNode<Object> next_value = IteratorValue(context, next);
// ii. Append nextValue to the end of the List values.
values.Push(next_value);
Goto(&loop_start);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index cf421dc5b7..db86c65385 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -32,18 +32,19 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorstep
- // Returns `false` if the iterator is done, otherwise returns an
- // iterator result.
+ // If the iterator is done, goto {if_done}, otherwise returns an iterator
+ // result.
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
// object, loaded from the native context.
- TNode<Object> IteratorStep(Node* context, const IteratorRecord& iterator,
- Label* if_done,
- Node* fast_iterator_result_map = nullptr,
- Label* if_exception = nullptr,
- Variable* exception = nullptr);
-
- TNode<Object> IteratorStep(Node* context, const IteratorRecord& iterator,
- Node* fast_iterator_result_map, Label* if_done) {
+ TNode<JSReceiver> IteratorStep(Node* context, const IteratorRecord& iterator,
+ Label* if_done,
+ Node* fast_iterator_result_map = nullptr,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ TNode<JSReceiver> IteratorStep(Node* context, const IteratorRecord& iterator,
+ Node* fast_iterator_result_map,
+ Label* if_done) {
return IteratorStep(context, iterator, if_done, fast_iterator_result_map);
}
@@ -51,10 +52,10 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// Return the `value` field from an iterator.
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
// object, loaded from the native context.
- Node* IteratorValue(Node* context, Node* result,
- Node* fast_iterator_result_map = nullptr,
- Label* if_exception = nullptr,
- Variable* exception = nullptr);
+ TNode<Object> IteratorValue(
+ TNode<Context> context, TNode<JSReceiver> result,
+ base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt,
+ Label* if_exception = nullptr, Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorclose
void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index 6d3274a4a5..cce780ab9f 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -20,7 +20,6 @@ BUILTIN(MathHypot) {
if (length == 0) return Smi::kZero;
DCHECK_LT(0, length);
double max = 0;
- bool one_arg_is_nan = false;
std::vector<double> abs_values;
abs_values.reserve(length);
for (int i = 0; i < length; i++) {
@@ -28,29 +27,20 @@ BUILTIN(MathHypot) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
Object::ToNumber(isolate, x));
double abs_value = std::abs(x->Number());
-
- if (std::isnan(abs_value)) {
- one_arg_is_nan = true;
- } else {
- abs_values.push_back(abs_value);
- if (max < abs_value) {
- max = abs_value;
- }
+ abs_values.push_back(abs_value);
+ // Use negation here to make sure that {max} is NaN
+ // in the end in case any of the arguments was NaN.
+ if (!(abs_value <= max)) {
+ max = abs_value;
}
}
- if (max == V8_INFINITY) {
- return *isolate->factory()->NewNumber(V8_INFINITY);
- }
-
- if (one_arg_is_nan) {
- return ReadOnlyRoots(isolate).nan_value();
- }
-
if (max == 0) {
return Smi::kZero;
+ } else if (max == V8_INFINITY) {
+ return ReadOnlyRoots(isolate).infinity_value();
}
- DCHECK_GT(max, 0);
+ DCHECK(!(max <= 0));
// Kahan summation to avoid rounding errors.
// Normalize the numbers to the largest one to avoid overflow.
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 5b3af79f00..f5c4477c23 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -315,8 +315,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
// ES6 #sec-number.prototype.valueof
TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber,
"Number.prototype.valueOf");
@@ -538,8 +538,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&do_bigint_add);
{
- Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Operation::kAdd)));
+ TailCallBuiltin(Builtins::kBigIntAdd, context, var_left.value(),
+ var_right.value());
}
BIND(&do_double_add);
@@ -996,8 +996,8 @@ TF_BUILTIN(Equal, CodeStubAssembler) {
}
TF_BUILTIN(StrictEqual, CodeStubAssembler) {
- Node* lhs = Parameter(Descriptor::kLeft);
- Node* rhs = Parameter(Descriptor::kRight);
+ TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
Return(StrictEqual(lhs, rhs));
}
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 929e686604..d2fb0ff74c 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -25,8 +25,8 @@ BUILTIN(NumberPrototypeToExponential) {
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -70,8 +70,8 @@ BUILTIN(NumberPrototypeToFixed) {
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -117,8 +117,8 @@ BUILTIN(NumberPrototypeToLocaleString) {
Handle<Object> value = args.at(0);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
// 1. Let x be ? thisNumberValue(this value)
if (!value->IsNumber()) {
@@ -147,8 +147,8 @@ BUILTIN(NumberPrototypeToPrecision) {
Handle<Object> precision = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -192,8 +192,8 @@ BUILTIN(NumberPrototypeToString) {
Handle<Object> radix = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 314331d498..8d59ee3bd1 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -65,8 +65,6 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind);
- TNode<Uint32T> HasHiddenPrototype(TNode<Map> map);
-
TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) {
return DecodeWord32<PropertyDetails::KindField>(details);
}
@@ -185,12 +183,6 @@ TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
return Word32Equal(kind, Int32Constant(PropertyKind::kData));
}
-TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
- TNode<Map> map) {
- TNode<Uint32T> bit_field2 = Unsigned(LoadMapBitField2(map));
- return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field2);
-}
-
void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
TNode<Context> context, TNode<Object> maybe_object,
CollectType collect_type) {
@@ -254,7 +246,6 @@ void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
TNode<Map> map, Label* if_slow) {
GotoIf(IsStringWrapperElementsKind(map), if_slow);
GotoIf(IsSpecialReceiverMap(map), if_slow);
- GotoIf(HasHiddenPrototype(map), if_slow);
GotoIf(IsDictionaryMap(map), if_slow);
}
@@ -602,9 +593,19 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
if_fast(this), try_fast(this, Label::kDeferred),
if_slow(this, Label::kDeferred), if_join(this);
- // Check if the {object} has a usable enum cache.
+ // Take the slow path if the {object} IsCustomElementsReceiverInstanceType or
+ // has any elements.
GotoIf(TaggedIsSmi(object), &if_slow);
Node* object_map = LoadMap(object);
+ TNode<Int32T> instance_type = LoadMapInstanceType(object_map);
+ GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow);
+ Node* object_elements = LoadElements(object);
+ GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
+ Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
+ &if_slow);
+
+ // Check if the {object} has a usable enum cache.
+ BIND(&if_empty_elements);
Node* object_bit_field3 = LoadMapBitField3(object_map);
Node* object_enum_length =
DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3);
@@ -612,15 +613,7 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)),
&try_fast);
- // Ensure that the {object} doesn't have any elements.
- CSA_ASSERT(this, IsJSObjectMap(object_map));
- Node* object_elements = LoadElements(object);
- GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
- Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
- &if_slow);
-
// Check whether all own properties are enumerable.
- BIND(&if_empty_elements);
Node* number_descriptors =
DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(object_bit_field3);
GotoIfNot(WordEqual(object_enum_length, number_descriptors), &if_slow);
@@ -728,11 +721,11 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
// invoke the ToObject builtin, which raises the appropriate error.
// Otherwise we don't need to invoke ToObject, since {receiver} is
// either already a JSReceiver, in which case ToObject is a no-op,
- // or it's a Primitive and ToObject would allocate a fresh JSValue
+ // or it's a Primitive and ToObject would allocate a fresh JSPrimitiveWrapper
// wrapper, which wouldn't be identical to any existing JSReceiver
// found in the prototype chain of {value}, hence it will return
// false no matter if we search for the Primitive {receiver} or
- // a newly allocated JSValue wrapper for {receiver}.
+ // a newly allocated JSPrimitiveWrapper wrapper for {receiver}.
GotoIf(IsNull(receiver), &if_receiverisnullorundefined);
GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined);
@@ -794,7 +787,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
{JS_SPECIAL_API_OBJECT_TYPE, &if_apiobject},
{JS_PROXY_TYPE, &if_proxy},
{JS_ERROR_TYPE, &if_error},
- {JS_VALUE_TYPE, &if_value}};
+ {JS_PRIMITIVE_WRAPPER_TYPE, &if_value}};
size_t const kNumCases = arraysize(kJumpTable);
Label* case_labels[kNumCases];
int32_t case_values[kNumCases];
@@ -996,7 +989,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
if_value_is_bigint(this, Label::kDeferred),
if_value_is_string(this, Label::kDeferred);
- Node* receiver_value = LoadJSValueValue(receiver);
+ Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver);
// We need to start with the object to see if the value was a subclass
// which might have interesting properties.
var_holder.Bind(receiver);
@@ -1346,10 +1339,15 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(
result, JSGeneratorObject::kParametersAndRegistersOffset,
parameters_and_registers);
+ Node* resume_mode = SmiConstant(JSGeneratorObject::ResumeMode::kNext);
+ StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kResumeModeOffset,
+ resume_mode);
Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
executing);
- GotoIfNot(HasInstanceType(maybe_map, JS_ASYNC_GENERATOR_OBJECT_TYPE), &done);
+ GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(maybe_map),
+ JS_ASYNC_GENERATOR_OBJECT_TYPE),
+ &done);
StoreObjectFieldNoWriteBarrier(
result, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
Goto(&done);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 59e4373f98..1ca5fffd8d 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-factory.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/logging/counters.h"
#include "src/objects/keys.h"
@@ -218,52 +218,6 @@ BUILTIN(ObjectFreeze) {
return *object;
}
-// ES section 19.1.2.9 Object.getPrototypeOf ( O )
-BUILTIN(ObjectGetPrototypeOf) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
-
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
-
- RETURN_RESULT_OR_FAILURE(isolate,
- JSReceiver::GetPrototype(isolate, receiver));
-}
-
-// ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
-BUILTIN(ObjectSetPrototypeOf) {
- HandleScope scope(isolate);
-
- // 1. Let O be ? RequireObjectCoercible(O).
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsNullOrUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "Object.setPrototypeOf")));
- }
-
- // 2. If Type(proto) is neither Object nor Null, throw a TypeError exception.
- Handle<Object> proto = args.atOrUndefined(isolate, 2);
- if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
- }
-
- // 3. If Type(O) is not Object, return O.
- if (!object->IsJSReceiver()) return *object;
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-
- // 4. Let status be ? O.[[SetPrototypeOf]](proto).
- // 5. If status is false, throw a TypeError exception.
- MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError),
- ReadOnlyRoots(isolate).exception());
-
- // 6. Return O.
- return *receiver;
-}
-
// ES6 section B.2.2.1.1 get Object.prototype.__proto__
BUILTIN(ObjectPrototypeGetProto) {
HandleScope scope(isolate);
@@ -332,18 +286,6 @@ BUILTIN(ObjectGetOwnPropertySymbols) {
return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
}
-// ES6 section 19.1.2.11 Object.isExtensible ( O )
-BUILTIN(ObjectIsExtensible) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Maybe<bool> result =
- object->IsJSReceiver()
- ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
- : Just(false);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
-
// ES6 section 19.1.2.12 Object.isFrozen ( O )
BUILTIN(ObjectIsFrozen) {
HandleScope scope(isolate);
@@ -403,18 +345,6 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
return *descriptors;
}
-// ES6 section 19.1.2.15 Object.preventExtensions ( O )
-BUILTIN(ObjectPreventExtensions) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsJSReceiver()) {
- MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
- kThrowOnError),
- ReadOnlyRoots(isolate).exception());
- }
- return *object;
-}
-
// ES6 section 19.1.2.17 Object.seal ( O )
BUILTIN(ObjectSeal) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index ad70fb1dd1..1339e2dccd 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -2062,7 +2062,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
TNode<Object> resolve =
GetProperty(native_context, constructor, factory()->resolve_string());
- GotoIfException(resolve, if_exception, var_exception);
+ GotoIfException(resolve, &close_iterator, var_exception);
// 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
// exception.
@@ -2077,9 +2077,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// Let next be IteratorStep(iteratorRecord.[[Iterator]]).
// If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
// ReturnIfAbrupt(next).
- Node* const fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
- Node* const next = iter_assembler.IteratorStep(
+ TNode<Map> const fast_iterator_result_map = CAST(
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
+ TNode<JSReceiver> const next = iter_assembler.IteratorStep(
native_context, iterator, &done_loop, fast_iterator_result_map,
if_exception, var_exception);
@@ -2087,7 +2087,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
// true.
// ReturnIfAbrupt(nextValue).
- Node* const next_value = iter_assembler.IteratorValue(
+ TNode<Object> const next_value = iter_assembler.IteratorValue(
native_context, next, fast_iterator_result_map, if_exception,
var_exception);
@@ -2148,7 +2148,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
&if_slow);
GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow);
GotoIf(TaggedIsSmi(next_value), &if_slow);
- Node* const next_value_map = LoadMap(next_value);
+ Node* const next_value_map = LoadMap(CAST(next_value));
BranchIfPromiseThenLookupChainIntact(native_context, next_value_map,
&if_fast, &if_slow);
@@ -2526,8 +2526,7 @@ TF_BUILTIN(PromiseAllSettledResolveElementClosure, PromiseBuiltinsAssembler) {
LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
TNode<Map> object_function_map = Cast(LoadObjectField(
object_function, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<JSObject> obj =
- Cast(AllocateJSObjectFromMap(object_function_map));
+ TNode<JSObject> obj = AllocateJSObjectFromMap(object_function_map);
// 10. Perform ! CreateDataProperty(obj, "status", "fulfilled").
CallBuiltin(Builtins::kFastCreateDataProperty, context, obj,
@@ -2557,8 +2556,7 @@ TF_BUILTIN(PromiseAllSettledRejectElementClosure, PromiseBuiltinsAssembler) {
LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
TNode<Map> object_function_map = Cast(LoadObjectField(
object_function, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<JSObject> obj =
- Cast(AllocateJSObjectFromMap(object_function_map));
+ TNode<JSObject> obj = AllocateJSObjectFromMap(object_function_map);
// 10. Perform ! CreateDataProperty(obj, "status", "rejected").
CallBuiltin(Builtins::kFastCreateDataProperty, context, obj,
@@ -2579,7 +2577,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
"Promise.race");
@@ -2626,11 +2624,11 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
TNode<Object> resolve =
GetProperty(native_context, receiver, factory()->resolve_string());
- GotoIfException(resolve, &reject_promise, &var_exception);
+ GotoIfException(resolve, &close_iterator, &var_exception);
// 4. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
// exception.
- ThrowIfNotCallable(CAST(context), resolve, "resolve");
+ ThrowIfNotCallable(context, resolve, "resolve");
var_promise_resolve_function = resolve;
Goto(&loop);
@@ -2638,13 +2636,13 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
BIND(&loop);
{
- Node* const fast_iterator_result_map = LoadContextElement(
- native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ TNode<Map> const fast_iterator_result_map = CAST(LoadContextElement(
+ native_context, Context::ITERATOR_RESULT_MAP_INDEX));
// Let next be IteratorStep(iteratorRecord.[[Iterator]]).
// If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
// ReturnIfAbrupt(next).
- Node* const next = iter_assembler.IteratorStep(
+ TNode<JSReceiver> const next = iter_assembler.IteratorStep(
context, iterator, &break_loop, fast_iterator_result_map,
&reject_promise, &var_exception);
@@ -2652,7 +2650,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
// true.
// ReturnIfAbrupt(nextValue).
- Node* const next_value =
+ TNode<Object> const next_value =
iter_assembler.IteratorValue(context, next, fast_iterator_result_map,
&reject_promise, &var_exception);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index a1a2f6308f..948540ea5f 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -13,8 +13,9 @@
namespace v8 {
namespace internal {
-Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
- Node* context) {
+compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
+ TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSReceiver> handler) {
VARIABLE(map, MachineRepresentation::kTagged);
Label callable_target(this), constructor_target(this), none_target(this),
@@ -53,7 +54,7 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target);
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler);
- return proxy;
+ return CAST(proxy);
}
Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
@@ -121,8 +122,9 @@ Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
return context;
}
-Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
- Node* context) {
+compiler::TNode<JSFunction>
+ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode<Context> context,
+ TNode<JSProxy> proxy) {
Node* const native_context = LoadNativeContext(context);
Node* const proxy_context =
@@ -132,13 +134,8 @@ Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
Node* const revoke_info =
LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN);
- return AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
- proxy_context);
-}
-
-Node* ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget() {
- return CodeAssembler::Parameter(static_cast<int>(
- Builtin_ProxyConstructor_InterfaceDescriptor::kJSNewTarget));
+ return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
+ proxy_context));
}
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
@@ -262,9 +259,11 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
{ ThrowTypeError(context, MessageTemplate::kProxyRevoked, "construct"); }
}
-Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult(
- Node* context, Node* target, Node* proxy, Node* name, Node* trap_result,
+void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
+ TNode<Context> context, TNode<JSReceiver> target, TNode<JSProxy> proxy,
+ TNode<Name> name, TNode<Object> trap_result,
JSProxy::AccessKind access_kind) {
+ // TODO(mslekova): Think of a better name for the trap_result param.
Node* map = LoadMap(target);
VARIABLE(var_value, MachineRepresentation::kTagged);
VARIABLE(var_details, MachineRepresentation::kWord32);
@@ -273,7 +272,7 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult(
Label if_found_value(this), check_in_runtime(this, Label::kDeferred),
check_passed(this);
- GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime);
+ GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
@@ -366,12 +365,13 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult(
}
BIND(&check_passed);
- return trap_result;
}
}
-Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
- Node* proxy, Node* name) {
+void ProxiesCodeStubAssembler::CheckHasTrapResult(TNode<Context> context,
+ TNode<JSReceiver> target,
+ TNode<JSProxy> proxy,
+ TNode<Name> name) {
Node* target_map = LoadMap(target);
VARIABLE(var_value, MachineRepresentation::kTagged);
VARIABLE(var_details, MachineRepresentation::kWord32);
@@ -383,7 +383,7 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
check_in_runtime(this, Label::kDeferred);
// 9.a. Let targetDesc be ? target.[[GetOwnProperty]](P).
- GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime);
+ GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, target_map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
@@ -419,7 +419,64 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
}
BIND(&check_passed);
- return FalseConstant();
+}
+
+void ProxiesCodeStubAssembler::CheckDeleteTrapResult(TNode<Context> context,
+ TNode<JSReceiver> target,
+ TNode<JSProxy> proxy,
+ TNode<Name> name) {
+ TNode<Map> target_map = LoadMap(target);
+ TVARIABLE(Object, var_value);
+ TVARIABLE(Uint32T, var_details);
+ TVARIABLE(Object, var_raw_value);
+
+ Label if_found_value(this, Label::kDeferred),
+ throw_non_configurable(this, Label::kDeferred),
+ throw_non_extensible(this, Label::kDeferred), check_passed(this),
+ check_in_runtime(this, Label::kDeferred);
+
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
+ TNode<Int32T> instance_type = LoadInstanceType(target);
+ TryGetOwnProperty(context, target, target, target_map, instance_type, name,
+ &if_found_value, &var_value, &var_details, &var_raw_value,
+ &check_passed, &check_in_runtime, kReturnAccessorPair);
+
+ // 11. If targetDesc is undefined, return true.
+ BIND(&if_found_value);
+ {
+ // 12. If targetDesc.[[Configurable]] is false, throw a TypeError exception.
+ TNode<BoolT> non_configurable = IsSetWord32(
+ var_details.value(), PropertyDetails::kAttributesDontDeleteMask);
+ GotoIf(non_configurable, &throw_non_configurable);
+
+ // 13. Let extensibleTarget be ? IsExtensible(target).
+ TNode<BoolT> target_extensible = IsExtensibleMap(target_map);
+
+ // 14. If extensibleTarget is false, throw a TypeError exception.
+ GotoIfNot(target_extensible, &throw_non_extensible);
+ Goto(&check_passed);
+ }
+
+ BIND(&throw_non_configurable);
+ {
+ ThrowTypeError(context,
+ MessageTemplate::kProxyDeletePropertyNonConfigurable, name);
+ }
+
+ BIND(&throw_non_extensible);
+ {
+ ThrowTypeError(context, MessageTemplate::kProxyDeletePropertyNonExtensible,
+ name);
+ }
+
+ BIND(&check_in_runtime);
+ {
+ CallRuntime(Runtime::kCheckProxyDeleteTrapResult, context, name, target);
+ Goto(&check_passed);
+ }
+
+ BIND(&check_passed);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index fcaac7df66..cb51faf575 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -17,19 +17,21 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* AllocateProxy(Node* target, Node* handler, Node* context);
- Node* AllocateProxyRevokeFunction(Node* proxy, Node* context);
+ TNode<JSProxy> AllocateProxy(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSReceiver> handler);
+ TNode<JSFunction> AllocateProxyRevokeFunction(TNode<Context> context,
+ TNode<JSProxy> proxy);
- // Get JSNewTarget parameter for ProxyConstructor builtin (Torque).
- // TODO(v8:9120): Remove this once torque support exists
- Node* GetProxyConstructorJSNewTarget();
+ void CheckGetSetTrapResult(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSProxy> proxy, TNode<Name> name,
+ TNode<Object> trap_result,
+ JSProxy::AccessKind access_kind);
- Node* CheckGetSetTrapResult(Node* context, Node* target, Node* proxy,
- Node* name, Node* trap_result,
- JSProxy::AccessKind access_kind);
+ void CheckHasTrapResult(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSProxy> proxy, TNode<Name> name);
- Node* CheckHasTrapResult(Node* context, Node* target, Node* proxy,
- Node* name);
+ void CheckDeleteTrapResult(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSProxy> proxy, TNode<Name> name);
protected:
enum ProxyRevokeFunctionContextSlot {
@@ -37,9 +39,10 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
kProxyContextLength,
};
- Node* AllocateJSArrayForCodeStubArguments(Node* context,
- CodeStubArguments& args, Node* argc,
- ParameterMode mode);
+ Node* AllocateJSArrayForCodeStubArguments(
+ Node* context,
+ CodeStubArguments& args, // NOLINT(runtime/references)
+ Node* argc, ParameterMode mode);
private:
Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context);
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index e998652dad..6151fcbd47 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -46,53 +46,6 @@ BUILTIN(ReflectDefineProperty) {
return *isolate->factory()->ToBoolean(result.FromJust());
}
-// ES6 section 26.1.4 Reflect.deleteProperty
-BUILTIN(ReflectDeleteProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at(1);
- Handle<Object> key = args.at(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.deleteProperty")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
- Handle<JSReceiver>::cast(target), name, LanguageMode::kSloppy);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-// ES6 section 26.1.6 Reflect.get
-BUILTIN(ReflectGet) {
- HandleScope scope(isolate);
- Handle<Object> target = args.atOrUndefined(isolate, 1);
- Handle<Object> key = args.atOrUndefined(isolate, 2);
- Handle<Object> receiver = args.length() > 3 ? args.at(3) : target;
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.get")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- RETURN_RESULT_OR_FAILURE(
- isolate, Object::GetPropertyOrElement(receiver, name,
- Handle<JSReceiver>::cast(target)));
-}
-
// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
BUILTIN(ReflectGetOwnPropertyDescriptor) {
HandleScope scope(isolate);
@@ -119,42 +72,6 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
return *desc.ToObject(isolate);
}
-// ES6 section 26.1.8 Reflect.getPrototypeOf
-BUILTIN(ReflectGetPrototypeOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.getPrototypeOf")));
- }
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
- RETURN_RESULT_OR_FAILURE(isolate,
- JSReceiver::GetPrototype(isolate, receiver));
-}
-
-// ES6 section 26.1.10 Reflect.isExtensible
-BUILTIN(ReflectIsExtensible) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.isExtensible")));
- }
-
- Maybe<bool> result =
- JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
// ES6 section 26.1.11 Reflect.ownKeys
BUILTIN(ReflectOwnKeys) {
HandleScope scope(isolate);
@@ -177,25 +94,6 @@ BUILTIN(ReflectOwnKeys) {
return *isolate->factory()->NewJSArrayWithElements(keys);
}
-// ES6 section 26.1.12 Reflect.preventExtensions
-BUILTIN(ReflectPreventExtensions) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.preventExtensions")));
- }
-
- Maybe<bool> result = JSReceiver::PreventExtensions(
- Handle<JSReceiver>::cast(target), kDontThrow);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
// ES6 section 26.1.13 Reflect.set
BUILTIN(ReflectSet) {
HandleScope scope(isolate);
@@ -223,30 +121,5 @@ BUILTIN(ReflectSet) {
return *isolate->factory()->ToBoolean(result.FromJust());
}
-// ES6 section 26.1.14 Reflect.setPrototypeOf
-BUILTIN(ReflectSetPrototypeOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at(1);
- Handle<Object> proto = args.at(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.setPrototypeOf")));
- }
-
- if (!proto->IsJSReceiver() && !proto->IsNull(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
- }
-
- Maybe<bool> result = JSReceiver::SetPrototype(
- Handle<JSReceiver>::cast(target), proto, true, kDontThrow);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 51ee2796e6..d53518ff7e 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -15,7 +15,7 @@
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -94,12 +94,12 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(TNode<Context> context,
TNode<String> pattern = Select<String>(
IsUndefined(maybe_string), [=] { return EmptyStringConstant(); },
[=] { return ToString_Inline(context, maybe_string); });
- TNode<Object> regexp = CAST(AllocateJSObjectFromMap(initial_map));
+ TNode<JSObject> regexp = AllocateJSObjectFromMap(initial_map);
return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp,
pattern, flags);
}
-TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndex(
+TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndexBeforeSmiCheck(
TNode<JSRegExp> regexp) {
// Load the in-object field.
static const int field_offset =
@@ -121,23 +121,27 @@ TNode<Object> RegExpBuiltinsAssembler::LoadLastIndex(TNode<Context> context,
// The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified
// JSRegExp instance.
-void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
+void RegExpBuiltinsAssembler::FastStoreLastIndex(TNode<JSRegExp> regexp,
+ TNode<Smi> value) {
// Store the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize;
StoreObjectField(regexp, field_offset, value);
}
-void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
- Node* value) {
- Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
- SetPropertyStrict(CAST(context), CAST(regexp), CAST(name), CAST(value));
+void RegExpBuiltinsAssembler::SlowStoreLastIndex(SloppyTNode<Context> context,
+ SloppyTNode<Object> regexp,
+ SloppyTNode<Number> value) {
+ TNode<Name> name = HeapConstant(isolate()->factory()->lastIndex_string());
+ SetPropertyStrict(context, regexp, name, value);
}
-void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
- Node* value, bool is_fastpath) {
+void RegExpBuiltinsAssembler::StoreLastIndex(TNode<Context> context,
+ TNode<Object> regexp,
+ TNode<Number> value,
+ bool is_fastpath) {
if (is_fastpath) {
- FastStoreLastIndex(regexp, value);
+ FastStoreLastIndex(CAST(regexp), CAST(value));
} else {
SlowStoreLastIndex(context, regexp, value);
}
@@ -248,10 +252,10 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<Context> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(LoadContextElement(
native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
- TNode<NameDictionary> properties = AllocateNameDictionary(num_properties);
+ TNode<NameDictionary> properties =
+ AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation);
- TNode<JSObject> group_object =
- CAST(AllocateJSObjectFromMap(map, properties));
+ TNode<JSObject> group_object = AllocateJSObjectFromMap(map, properties);
StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object);
TVARIABLE(IntPtrT, var_i, IntPtrZero());
@@ -534,19 +538,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
// We expect exactly one result since we force the called regexp to behave
// as non-global.
TNode<IntPtrT> int_result = ChangeInt32ToIntPtr(result);
+ GotoIf(
+ IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpSuccess)),
+ &if_success);
+ GotoIf(
+ IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpFailure)),
+ &if_failure);
GotoIf(IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::SUCCESS)),
- &if_success);
- GotoIf(IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::FAILURE)),
- &if_failure);
- GotoIf(IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::EXCEPTION)),
+ IntPtrConstant(RegExp::kInternalRegExpException)),
&if_exception);
- CSA_ASSERT(this,
- IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::RETRY)));
+ CSA_ASSERT(this, IntPtrEqual(int_result,
+ IntPtrConstant(RegExp::kInternalRegExpRetry)));
Goto(&runtime);
}
@@ -755,7 +758,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
GotoIfNot(should_update_last_index, &out);
// Update the new last index from {match_indices}.
- TNode<Number> new_lastindex = CAST(UnsafeLoadFixedArrayElement(
+ TNode<Smi> new_lastindex = CAST(UnsafeLoadFixedArrayElement(
CAST(match_indices), RegExpMatchInfo::kFirstCaptureIndex + 1));
StoreLastIndex(context, regexp, new_lastindex, is_fastpath);
@@ -852,7 +855,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Node* const last_index = FastLoadLastIndex(CAST(object));
+ TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object));
var_result.Bind(TaggedIsPositiveSmi(last_index));
Goto(&out);
@@ -897,7 +900,7 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
BIND(&check_last_index);
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- TNode<Object> last_index = FastLoadLastIndex(object);
+ TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(object);
var_result = TaggedIsPositiveSmi(last_index);
Goto(&out);
@@ -925,9 +928,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
// This should only be needed for String.p.(split||matchAll), but we are
// conservative here.
- GotoIf(IsRegExpSpeciesProtectorCellInvalid(), if_ismodified);
+ TNode<Context> native_context = LoadNativeContext(context);
+ GotoIf(IsRegExpSpeciesProtectorCellInvalid(native_context), if_ismodified);
- Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
Node* const initial_map =
@@ -954,7 +957,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Node* const last_index = FastLoadLastIndex(CAST(object));
+ TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object));
Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified);
}
@@ -1012,7 +1015,7 @@ TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
// Fast path stub for ATOM regexps. String matching is done by StringIndexOf,
// and {match_info} is updated on success.
-// The slow path is implemented in RegExpImpl::AtomExec.
+// The slow path is implemented in RegExp::AtomExec.
TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
TNode<String> subject_string = CAST(Parameter(Descriptor::kString));
@@ -1538,7 +1541,8 @@ TNode<Int32T> RegExpBuiltinsAssembler::FastFlagGetter(TNode<JSRegExp> regexp,
JSRegExp::Flag flag) {
TNode<Smi> flags = CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset));
TNode<Smi> mask = SmiConstant(flag);
- return SmiToInt32(SmiShr(SmiAnd(flags, mask), JSRegExp::FlagShiftBits(flag)));
+ return SmiToInt32(SmiShr(SmiAnd(flags, mask), base::bits::CountTrailingZeros(
+ static_cast<int>(flag))));
}
// Load through the GetProperty stub.
@@ -1807,10 +1811,9 @@ TF_BUILTIN(RegExpPrototypeTestFast, RegExpBuiltinsAssembler) {
Return(FalseConstant());
}
-Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
- Node* const index,
- Node* const is_unicode,
- bool is_fastpath) {
+TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
+ SloppyTNode<String> string, SloppyTNode<Number> index,
+ SloppyTNode<BoolT> is_unicode, bool is_fastpath) {
CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IsNumberNormalized(index));
if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
@@ -1818,8 +1821,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
// Default to last_index + 1.
// TODO(pwong): Consider using TrySmiAdd for the fast path to reduce generated
// code.
- Node* const index_plus_one = NumberInc(index);
- VARIABLE(var_result, MachineRepresentation::kTagged, index_plus_one);
+ TNode<Number> index_plus_one = NumberInc(index);
+ TVARIABLE(Number, var_result, index_plus_one);
// Advancing the index has some subtle issues involving the distinction
// between Smis and HeapNumbers. There's three cases:
@@ -1846,10 +1849,10 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
BIND(&if_isunicode);
{
TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
- TNode<IntPtrT> untagged_plus_one = SmiUntag(index_plus_one);
+ TNode<IntPtrT> untagged_plus_one = SmiUntag(CAST(index_plus_one));
GotoIfNot(IntPtrLessThan(untagged_plus_one, string_length), &out);
- Node* const lead = StringCharCodeAt(string, SmiUntag(index));
+ Node* const lead = StringCharCodeAt(string, SmiUntag(CAST(index)));
GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&out);
@@ -1860,8 +1863,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
&out);
// At a surrogate pair, return index + 2.
- Node* const index_plus_two = NumberInc(index_plus_one);
- var_result.Bind(index_plus_two);
+ TNode<Number> index_plus_two = NumberInc(index_plus_one);
+ var_result = index_plus_two;
Goto(&out);
}
@@ -1870,31 +1873,30 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
return var_result.value();
}
-void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
- Node* const regexp,
+void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
+ TNode<Object> regexp,
TNode<String> string,
const bool is_fastpath) {
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
Node* const is_global =
- FlagGetter(CAST(context), CAST(regexp), JSRegExp::kGlobal, is_fastpath);
+ FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
Label if_isglobal(this), if_isnotglobal(this);
Branch(is_global, &if_isglobal, &if_isnotglobal);
BIND(&if_isnotglobal);
{
- Node* const result =
- is_fastpath
- ? RegExpPrototypeExecBody(CAST(context), CAST(regexp), string, true)
- : RegExpExec(context, regexp, string);
+ Node* const result = is_fastpath ? RegExpPrototypeExecBody(
+ context, CAST(regexp), string, true)
+ : RegExpExec(context, regexp, string);
Return(result);
}
BIND(&if_isglobal);
{
- Node* const is_unicode = FlagGetter(CAST(context), CAST(regexp),
- JSRegExp::kUnicode, is_fastpath);
+ Node* const is_unicode =
+ FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
StoreLastIndex(context, regexp, SmiZero(), is_fastpath);
@@ -1935,8 +1937,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// On the fast path, grab the matching string from the raw match index
// array.
TNode<RegExpMatchInfo> match_indices =
- RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp),
- string, &if_didnotmatch, true);
+ RegExpPrototypeExecBodyWithoutResult(context, CAST(regexp), string,
+ &if_didnotmatch, true);
Label dosubstring(this), donotsubstring(this);
Branch(var_atom.value(), &donotsubstring, &dosubstring);
@@ -1988,15 +1990,14 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
TNode<Smi> const match_length = LoadStringLengthAsSmi(match);
GotoIfNot(SmiEqual(match_length, SmiZero()), &loop);
- Node* last_index =
- LoadLastIndex(CAST(context), CAST(regexp), is_fastpath);
+ Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
if (is_fastpath) {
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
} else {
last_index = ToLength_Inline(context, last_index);
}
- Node* const new_last_index =
+ TNode<Number> new_last_index =
AdvanceStringIndex(string, last_index, is_unicode, is_fastpath);
if (is_fastpath) {
@@ -2017,7 +2018,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// Wrap the match in a JSArray.
- Node* const result = array.ToJSArray(CAST(context));
+ Node* const result = array.ToJSArray(context);
Return(result);
}
}
@@ -2034,7 +2035,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
ThrowIfNotJSReceiver(context, maybe_receiver,
MessageTemplate::kIncompatibleMethodReceiver,
"RegExp.prototype.@@match");
- Node* const receiver = maybe_receiver;
+ TNode<JSReceiver> receiver = CAST(maybe_receiver);
// Convert {maybe_string} to a String.
TNode<String> const string = ToString_Inline(context, maybe_string);
@@ -2086,7 +2087,8 @@ void RegExpMatchAllAssembler::Generate(TNode<Context> context,
// 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
// 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
- FastStoreLastIndex(var_matcher.value(), FastLoadLastIndex(fast_regexp));
+ FastStoreLastIndex(CAST(var_matcher.value()),
+ FastLoadLastIndex(fast_regexp));
// 9. If flags contains "g", let global be true.
// 10. Else, let global be false.
@@ -2226,12 +2228,11 @@ TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
}
void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
- Node* const context, Node* const regexp, Node* const string) {
+ TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string) {
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
// Grab the initial value of last index.
- Node* const previous_last_index = FastLoadLastIndex(CAST(regexp));
+ TNode<Smi> previous_last_index = FastLoadLastIndex(regexp);
// Ensure last index is 0.
FastStoreLastIndex(regexp, SmiZero());
@@ -2239,7 +2240,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
// Call exec.
Label if_didnotmatch(this);
TNode<RegExpMatchInfo> match_indices = RegExpPrototypeExecBodyWithoutResult(
- CAST(context), CAST(regexp), CAST(string), &if_didnotmatch, true);
+ context, regexp, string, &if_didnotmatch, true);
// Successful match.
{
@@ -2839,16 +2840,14 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
GotoIfNot(IsEmptyString(match_str), &return_result);
// 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Smi> this_index = CAST(FastLoadLastIndex(CAST(iterating_regexp)));
- CSA_ASSERT(this, TaggedIsSmi(this_index));
+ TNode<Smi> this_index = FastLoadLastIndex(CAST(iterating_regexp));
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
- TNode<Smi> next_index = CAST(AdvanceStringIndex(
- iterating_string, this_index, HasUnicodeFlag(flags), true));
- CSA_ASSERT(this, TaggedIsSmi(next_index));
+ TNode<Smi> next_index = AdvanceStringIndexFast(
+ iterating_string, this_index, HasUnicodeFlag(flags));
// 3. Perform ? Set(R, "lastIndex", nextIndex, true).
- FastStoreLastIndex(iterating_regexp, next_index);
+ FastStoreLastIndex(CAST(iterating_regexp), next_index);
// iii. Return ! CreateIterResultObject(match, false).
Goto(&return_result);
@@ -2866,8 +2865,8 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
TNode<Number> this_index = ToLength_Inline(context, last_index);
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
- TNode<Object> next_index = CAST(AdvanceStringIndex(
- iterating_string, this_index, HasUnicodeFlag(flags), false));
+ TNode<Number> next_index = AdvanceStringIndex(
+ iterating_string, this_index, HasUnicodeFlag(flags), false);
// 3. Perform ? Set(R, "lastIndex", nextIndex, true).
SlowStoreLastIndex(context, iterating_regexp, next_index);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 88c00095b9..3677314f19 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -7,7 +7,7 @@
#include "src/base/optional.h"
#include "src/codegen/code-stub-assembler.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
namespace v8 {
namespace internal {
@@ -42,15 +42,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<FixedArray>* elements_out = nullptr);
- TNode<Object> FastLoadLastIndex(TNode<JSRegExp> regexp);
+ TNode<Object> FastLoadLastIndexBeforeSmiCheck(TNode<JSRegExp> regexp);
+ TNode<Smi> FastLoadLastIndex(TNode<JSRegExp> regexp) {
+ return CAST(FastLoadLastIndexBeforeSmiCheck(regexp));
+ }
TNode<Object> SlowLoadLastIndex(TNode<Context> context, TNode<Object> regexp);
TNode<Object> LoadLastIndex(TNode<Context> context, TNode<Object> regexp,
bool is_fastpath);
- void FastStoreLastIndex(Node* regexp, Node* value);
- void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
- void StoreLastIndex(Node* context, Node* regexp, Node* value,
- bool is_fastpath);
+ void FastStoreLastIndex(TNode<JSRegExp> regexp, TNode<Smi> value);
+ void SlowStoreLastIndex(SloppyTNode<Context> context,
+ SloppyTNode<Object> regexp,
+ SloppyTNode<Number> value);
+ void StoreLastIndex(TNode<Context> context, TNode<Object> regexp,
+ TNode<Number> value, bool is_fastpath);
// Loads {var_string_start} and {var_string_end} with the corresponding
// offsets into the given {string_data}.
@@ -127,20 +132,23 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* RegExpExec(Node* context, Node* regexp, Node* string);
- Node* AdvanceStringIndex(Node* const string, Node* const index,
- Node* const is_unicode, bool is_fastpath);
+ TNode<Number> AdvanceStringIndex(SloppyTNode<String> string,
+ SloppyTNode<Number> index,
+ SloppyTNode<BoolT> is_unicode,
+ bool is_fastpath);
- Node* AdvanceStringIndexFast(Node* const string, Node* const index,
- Node* const is_unicode) {
- return AdvanceStringIndex(string, index, is_unicode, true);
+ TNode<Smi> AdvanceStringIndexFast(TNode<String> string, TNode<Smi> index,
+ TNode<BoolT> is_unicode) {
+ return CAST(AdvanceStringIndex(string, index, is_unicode, true));
}
- void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
+ void RegExpPrototypeMatchBody(TNode<Context> context, TNode<Object> regexp,
TNode<String> const string,
const bool is_fastpath);
- void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
- Node* const string);
+ void RegExpPrototypeSearchBodyFast(TNode<Context> context,
+ TNode<JSRegExp> regexp,
+ TNode<String> string);
void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
Node* const string);
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 3e0f7182c7..e758782a99 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -6,8 +6,8 @@
#include "src/builtins/builtins.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
+#include "src/regexp/regexp.h"
#include "src/strings/string-builder-inl.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 5689b42619..97dc8ca895 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -545,32 +545,33 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
Return(result);
}
-TF_BUILTIN(StringCodePointAtUTF16, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
+
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
// Load the character code at the {position} from the {receiver}.
TNode<Int32T> code =
- LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16);
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
-TF_BUILTIN(StringCodePointAtUTF32, StringBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
+TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
+ TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<IntPtrT> position =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
// Load the character code at the {position} from the {receiver}.
TNode<Int32T> code =
- LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
- // And return it as TaggedSigned value.
- // TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromInt32(code);
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16);
+ // Create a String from the UTF16 encoded code point
+ TNode<String> result = StringFromSingleUTF16EncodedCodePoint(code);
Return(result);
}
@@ -952,19 +953,6 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
}
}
-void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
- Node* const value,
- const char* method_name) {
- Label out(this), throw_exception(this, Label::kDeferred);
- Branch(IsNullOrUndefined(value), &throw_exception, &out);
-
- BIND(&throw_exception);
- ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
- method_name);
-
- BIND(&out);
-}
-
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
Node* const context, Node* const object, Node* const maybe_string,
Handle<Symbol> symbol, DescriptorIndexAndName symbol_index,
@@ -1072,10 +1060,10 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label out(this);
- Node* const receiver = Parameter(Descriptor::kReceiver);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* const search = Parameter(Descriptor::kSearch);
Node* const replace = Parameter(Descriptor::kReplace);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Smi> const smi_zero = SmiConstant(0);
@@ -1578,7 +1566,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg);
Node* const limit = args.GetOptionalArgumentValue(kLimitArg);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -1986,12 +1974,12 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
switch (encoding) {
case UnicodeEncoding::UTF16:
- var_result = Signed(Word32Or(
+ var_result = Word32Or(
// Need to swap the order for big-endian platforms
#if V8_TARGET_BIG_ENDIAN
- Word32Shl(lead, Int32Constant(16)), trail));
+ Word32Shl(lead, Int32Constant(16)), trail);
#else
- Word32Shl(trail, Int32Constant(16)), lead));
+ Word32Shl(trail, Int32Constant(16)), lead);
#endif
break;
@@ -2002,8 +1990,8 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
// (lead << 10) + trail + SURROGATE_OFFSET
- var_result = Signed(Int32Add(Word32Shl(lead, Int32Constant(10)),
- Int32Add(trail, surrogate_offset)));
+ var_result = Int32Add(Word32Shl(lead, Int32Constant(10)),
+ Int32Add(trail, surrogate_offset));
break;
}
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 92ebd3803b..679ce0e17f 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -76,9 +76,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<Smi> subject_length,
TNode<Number> limit_number);
- void RequireObjectCoercible(Node* const context, Node* const value,
- const char* method_name);
-
TNode<BoolT> SmiIsNegative(TNode<Smi> value) {
return SmiLessThan(value, SmiConstant(0));
}
diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc
index 4e8c9f9850..610a8baeb3 100644
--- a/deps/v8/src/builtins/builtins-symbol-gen.cc
+++ b/deps/v8/src/builtins/builtins-symbol-gen.cc
@@ -13,8 +13,8 @@ namespace internal {
// ES #sec-symbol-objects
// ES #sec-symbol.prototype.description
TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.description");
@@ -24,8 +24,8 @@ TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
// ES6 #sec-symbol.prototype-@@toprimitive
TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype [ @@toPrimitive ]");
@@ -34,8 +34,8 @@ TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) {
// ES6 #sec-symbol.prototype.tostring
TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.toString");
@@ -45,8 +45,8 @@ TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) {
// ES6 #sec-symbol.prototype.valueof
TF_BUILTIN(SymbolPrototypeValueOf, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.valueOf");
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 8484685a6a..857d33988f 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -18,32 +18,12 @@ using compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
-// This is needed for gc_mole which will compile this file without the full set
-// of GN defined macros.
-#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64
-#endif
-
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
-// Setup the TypedArray which is under construction.
-// - Set the length.
-// - Set the byte_offset.
-// - Set the byte_length.
-// - Set EmbedderFields to 0.
-void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
- TNode<UintPtrT> length,
- TNode<UintPtrT> byte_offset,
- TNode<UintPtrT> byte_length) {
- StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kLengthOffset, length,
- MachineType::PointerRepresentation());
- StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset,
- byte_offset,
- MachineType::PointerRepresentation());
- StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteLengthOffset,
- byte_length,
- MachineType::PointerRepresentation());
+// Sets the embedder fields to 0 for a TypedArray which is under construction.
+void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
+ TNode<JSTypedArray> holder) {
for (int offset = JSTypedArray::kHeaderSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) {
StoreObjectField(holder, offset, SmiConstant(0));
@@ -54,8 +34,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
// elements.
// TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit.
TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
- TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<UintPtrT> byte_length) {
+ TNode<Context> context, TNode<UintPtrT> byte_length) {
TNode<Context> native_context = LoadNativeContext(context);
TNode<Map> map =
CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX));
@@ -97,16 +76,6 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) {
StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
-
- StoreObjectField(holder, JSTypedArray::kBufferOffset, buffer);
-
- TNode<ByteArray> elements = AllocateByteArray(byte_length);
- StoreObjectField(holder, JSTypedArray::kElementsOffset, elements);
- StoreObjectField(holder, JSTypedArray::kBasePointerOffset, elements);
- StoreObjectFieldNoWriteBarrier(
- holder, JSTypedArray::kExternalPointerOffset,
- PointerConstant(JSTypedArray::ExternalPointerForOnHeapArray()),
- MachineType::PointerRepresentation());
return buffer;
}
@@ -200,13 +169,13 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
Return(ChangeUintPtrToTagged(length));
}
-TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
+TNode<BoolT> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
TNode<Word32T> kind) {
return Word32Or(Word32Equal(kind, Int32Constant(UINT8_ELEMENTS)),
Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)));
}
-TNode<Word32T> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
+TNode<BoolT> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
TNode<Word32T> kind) {
return Word32Or(Word32Equal(kind, Int32Constant(BIGINT64_ELEMENTS)),
Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS)));
@@ -228,7 +197,12 @@ TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
TorqueStructTypedArrayElementsInfo
TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
TNode<JSTypedArray> typed_array) {
- TNode<Int32T> elements_kind = LoadElementsKind(typed_array);
+ return GetTypedArrayElementsInfo(LoadMap(typed_array));
+}
+
+TorqueStructTypedArrayElementsInfo
+TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(TNode<Map> map) {
+ TNode<Int32T> elements_kind = LoadMapElementsKind(map);
TVARIABLE(UintPtrT, var_size_log2);
TVARIABLE(Map, var_map);
ReadOnlyRoots roots(isolate());
@@ -294,10 +268,9 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
Label call_runtime(this), done(this);
TVARIABLE(Object, var_result);
- TNode<Object> buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
GotoIf(IsDetachedBuffer(buffer), &call_runtime);
- TNode<UintPtrT> backing_store = LoadObjectField<UintPtrT>(
- CAST(buffer), JSArrayBuffer::kBackingStoreOffset);
+ TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(buffer);
GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
var_result = buffer;
Goto(&done);
@@ -327,10 +300,10 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
TNode<Context> context, TNode<JSTypedArray> source,
TNode<JSTypedArray> target, TNode<IntPtrT> offset, Label* call_runtime,
Label* if_source_too_large) {
- CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(
- LoadObjectField(source, JSTypedArray::kBufferOffset))));
- CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(
- LoadObjectField(target, JSTypedArray::kBufferOffset))));
+ CSA_ASSERT(this, Word32BinaryNot(
+ IsDetachedBuffer(LoadJSArrayBufferViewBuffer(source))));
+ CSA_ASSERT(this, Word32BinaryNot(
+ IsDetachedBuffer(LoadJSArrayBufferViewBuffer(target))));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(offset, IntPtrConstant(0)));
CSA_ASSERT(this,
IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
@@ -774,8 +747,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
// ToNumber/ToBigInt may execute JavaScript code, which could
// detach the array's buffer.
- Node* buffer =
- LoadObjectField(new_typed_array, JSTypedArray::kBufferOffset);
+ TNode<JSArrayBuffer> buffer =
+ LoadJSArrayBufferViewBuffer(new_typed_array);
GotoIf(IsDetachedBuffer(buffer), &if_detached);
// GC may move backing store in ToNumber, thus load backing
@@ -997,8 +970,8 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// ToNumber/ToBigInt may execute JavaScript code, which could
// detach the array's buffer.
- Node* buffer = LoadObjectField(target_obj.value(),
- JSTypedArray::kBufferOffset);
+ TNode<JSArrayBuffer> buffer =
+ LoadJSArrayBufferViewBuffer(target_obj.value());
GotoIf(IsDetachedBuffer(buffer), &if_detached);
// GC may move backing store in map_fn, thus load backing
@@ -1027,7 +1000,5 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
"%TypedArray%.from");
}
-#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 6fb02a657c..d637bc9c6b 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -27,15 +27,12 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
const char* method_name,
IterationKind iteration_kind);
- void SetupTypedArray(TNode<JSTypedArray> holder, TNode<UintPtrT> length,
- TNode<UintPtrT> byte_offset,
- TNode<UintPtrT> byte_length);
+ void SetupTypedArrayEmbedderFields(TNode<JSTypedArray> holder);
void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
TNode<Map> map, TNode<Smi> length,
TNode<UintPtrT> byte_offset);
TNode<JSArrayBuffer> AllocateEmptyOnHeapBuffer(TNode<Context> context,
- TNode<JSTypedArray> holder,
TNode<UintPtrT> byte_length);
TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
@@ -44,16 +41,17 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<UintPtrT> byte_offset);
// Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
- TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
+ TNode<BoolT> IsUint8ElementsKind(TNode<Word32T> kind);
// Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS.
- TNode<Word32T> IsBigInt64ElementsKind(TNode<Word32T> kind);
+ TNode<BoolT> IsBigInt64ElementsKind(TNode<Word32T> kind);
// Returns the byte size of an element for a TypedArray elements kind.
TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
// Returns information (byte size and map) about a TypedArray's elements.
ElementsInfo GetTypedArrayElementsInfo(TNode<JSTypedArray> typed_array);
+ ElementsInfo GetTypedArrayElementsInfo(TNode<Map> map);
TNode<JSFunction> GetDefaultConstructor(TNode<Context> context,
TNode<JSTypedArray> exemplar);
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index 78f37c0cf5..18738d2c48 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -48,14 +48,24 @@ BUILTIN(FinalizationGroupRegister) {
HandleScope scope(isolate);
const char* method_name = "FinalizationGroup.prototype.register";
+ // 1. Let finalizationGroup be the this value.
+ //
+ // 2. If Type(finalizationGroup) is not Object, throw a TypeError
+ // exception.
+ //
+ // 4. If finalizationGroup does not have a [[Cells]] internal slot,
+ // throw a TypeError exception.
CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
Handle<Object> target = args.atOrUndefined(isolate, 1);
+
+ // 3. If Type(target) is not Object, throw a TypeError exception.
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kWeakRefsRegisterTargetMustBeObject));
}
+
Handle<Object> holdings = args.atOrUndefined(isolate, 2);
if (target->SameValue(*holdings)) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -64,15 +74,21 @@ BUILTIN(FinalizationGroupRegister) {
MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame));
}
- Handle<Object> key = args.atOrUndefined(isolate, 3);
- // TODO(marja, gsathya): Restrictions on "key" (e.g., does it need to be an
- // object).
+ Handle<Object> unregister_token = args.atOrUndefined(isolate, 3);
+ // 5. If Type(unregisterToken) is not Object,
+ // a. If unregisterToken is not undefined, throw a TypeError exception.
+ if (!unregister_token->IsJSReceiver() && !unregister_token->IsUndefined()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject,
+ unregister_token));
+ }
// TODO(marja): Realms.
JSFinalizationGroup::Register(finalization_group,
- Handle<JSReceiver>::cast(target), holdings, key,
- isolate);
+ Handle<JSReceiver>::cast(target), holdings,
+ unregister_token, isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -80,25 +96,63 @@ BUILTIN(FinalizationGroupUnregister) {
HandleScope scope(isolate);
const char* method_name = "FinalizationGroup.prototype.unregister";
+ // 1. Let finalizationGroup be the this value.
+ //
+ // 2. If Type(finalizationGroup) is not Object, throw a TypeError
+ // exception.
+ //
+ // 3. If finalizationGroup does not have a [[Cells]] internal slot,
+ // throw a TypeError exception.
CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
- Handle<Object> key = args.atOrUndefined(isolate, 1);
- JSFinalizationGroup::Unregister(finalization_group, key, isolate);
- return ReadOnlyRoots(isolate).undefined_value();
+ Handle<Object> unregister_token = args.atOrUndefined(isolate, 1);
+
+ // 4. If Type(unregisterToken) is not Object, throw a TypeError exception.
+ if (!unregister_token->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject,
+ unregister_token));
+ }
+
+ bool success = JSFinalizationGroup::Unregister(
+ finalization_group, Handle<JSReceiver>::cast(unregister_token), isolate);
+
+ return *isolate->factory()->ToBoolean(success);
}
BUILTIN(FinalizationGroupCleanupSome) {
HandleScope scope(isolate);
const char* method_name = "FinalizationGroup.prototype.cleanupSome";
+ // 1. Let finalizationGroup be the this value.
+ //
+ // 2. If Type(finalizationGroup) is not Object, throw a TypeError
+ // exception.
+ //
+ // 3. If finalizationGroup does not have a [[Cells]] internal slot,
+ // throw a TypeError exception.
CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
- // TODO(marja, gsathya): Add missing "cleanup" callback.
+ Handle<Object> callback(finalization_group->cleanup(), isolate);
+ Handle<Object> callback_obj = args.atOrUndefined(isolate, 1);
+
+ // 4. If callback is not undefined and IsCallable(callback) is
+ // false, throw a TypeError exception.
+ if (!callback_obj->IsUndefined(isolate)) {
+ if (!callback_obj->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable));
+ }
+ callback = callback_obj;
+ }
// Don't do set_scheduled_for_cleanup(false); we still have the microtask
// scheduled and don't want to schedule another one in case the user never
// executes microtasks.
- JSFinalizationGroup::Cleanup(finalization_group, isolate);
+ JSFinalizationGroup::Cleanup(isolate, finalization_group, callback);
+
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -138,7 +192,7 @@ BUILTIN(WeakRefConstructor) {
}
Handle<JSReceiver> target_receiver =
handle(JSReceiver::cast(*target_object), isolate);
- isolate->heap()->AddKeepDuringJobTarget(target_receiver);
+ isolate->heap()->KeepDuringJob(target_receiver);
// TODO(marja): Realms.
@@ -158,9 +212,9 @@ BUILTIN(WeakRefDeref) {
if (weak_ref->target().IsJSReceiver()) {
Handle<JSReceiver> target =
handle(JSReceiver::cast(weak_ref->target()), isolate);
- // AddKeepDuringJobTarget might allocate and cause a GC, but it won't clear
+ // KeepDuringJob might allocate and cause a GC, but it won't clear
// weak_ref since we hold a Handle to its target.
- isolate->heap()->AddKeepDuringJobTarget(target);
+ isolate->heap()->KeepDuringJob(target);
} else {
DCHECK(weak_ref->target().IsUndefined(isolate));
}
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
index eb95a77023..b83906d109 100644
--- a/deps/v8/src/builtins/collections.tq
+++ b/deps/v8/src/builtins/collections.tq
@@ -33,7 +33,7 @@ namespace collections {
}
}
}
- case (receiver: JSReceiver): {
+ case (JSReceiver): {
goto MayHaveSideEffects;
}
case (o: Object): deferred {
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 842e9527ee..62a0cc31c3 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -74,16 +74,17 @@ namespace data_view {
// ES6 section 24.2.4.1 get DataView.prototype.buffer
javascript builtin DataViewPrototypeGetBuffer(
- context: Context, receiver: Object, ...arguments): JSArrayBuffer {
- let dataView: JSDataView =
+ js-implicit context: Context,
+ receiver: Object)(...arguments): JSArrayBuffer {
+ const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.buffer');
return dataView.buffer;
}
// ES6 section 24.2.4.2 get DataView.prototype.byteLength
javascript builtin DataViewPrototypeGetByteLength(
- context: Context, receiver: Object, ...arguments): Number {
- let dataView: JSDataView = ValidateDataView(
+ js-implicit context: Context, receiver: Object)(...arguments): Number {
+ const dataView: JSDataView = ValidateDataView(
context, receiver, 'get DataView.prototype.byte_length');
if (WasNeutered(dataView)) {
// TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
@@ -95,8 +96,8 @@ namespace data_view {
// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
javascript builtin DataViewPrototypeGetByteOffset(
- context: Context, receiver: Object, ...arguments): Number {
- let dataView: JSDataView = ValidateDataView(
+ js-implicit context: Context, receiver: Object)(...arguments): Number {
+ const dataView: JSDataView = ValidateDataView(
context, receiver, 'get DataView.prototype.byte_offset');
if (WasNeutered(dataView)) {
// TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
@@ -128,7 +129,7 @@ namespace data_view {
macro LoadDataView16(
buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
signed: constexpr bool): Number {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
let b0: int32;
let b1: int32;
@@ -155,12 +156,12 @@ namespace data_view {
macro LoadDataView32(
buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
kind: constexpr ElementsKind): Number {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = LoadUint8(dataPointer, offset);
- let b1: uint32 = LoadUint8(dataPointer, offset + 1);
- let b2: uint32 = LoadUint8(dataPointer, offset + 2);
- let b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
let result: uint32;
if (requestedLittleEndian) {
@@ -174,7 +175,7 @@ namespace data_view {
} else if constexpr (kind == UINT32_ELEMENTS) {
return Convert<Number>(result);
} else if constexpr (kind == FLOAT32_ELEMENTS) {
- let floatRes: float64 = Convert<float64>(BitcastInt32ToFloat32(result));
+ const floatRes: float64 = Convert<float64>(BitcastInt32ToFloat32(result));
return Convert<Number>(floatRes);
} else {
unreachable;
@@ -184,16 +185,16 @@ namespace data_view {
macro LoadDataViewFloat64(
buffer: JSArrayBuffer, offset: uintptr,
requestedLittleEndian: bool): Number {
- let dataPointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = LoadUint8(dataPointer, offset);
- let b1: uint32 = LoadUint8(dataPointer, offset + 1);
- let b2: uint32 = LoadUint8(dataPointer, offset + 2);
- let b3: uint32 = LoadUint8(dataPointer, offset + 3);
- let b4: uint32 = LoadUint8(dataPointer, offset + 4);
- let b5: uint32 = LoadUint8(dataPointer, offset + 5);
- let b6: uint32 = LoadUint8(dataPointer, offset + 6);
- let b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ const dataPointer: RawPtr = buffer.backing_store;
+
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ const b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ const b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ const b7: uint32 = LoadUint8(dataPointer, offset + 7);
let lowWord: uint32;
let highWord: uint32;
@@ -212,74 +213,49 @@ namespace data_view {
return Convert<Number>(result);
}
- extern macro AllocateBigInt(intptr): BigInt;
- extern macro StoreBigIntBitfield(BigInt, uint32): void;
- extern macro StoreBigIntDigit(BigInt, constexpr int31, uintptr): void;
- extern macro DataViewBuiltinsAssembler::DataViewEncodeBigIntBits(
- constexpr bool, constexpr int31): uint32;
-
- const kPositiveBigInt: constexpr bool = false;
- const kNegativeBigInt: constexpr bool = true;
const kZeroDigitBigInt: constexpr int31 = 0;
const kOneDigitBigInt: constexpr int31 = 1;
const kTwoDigitBigInt: constexpr int31 = 2;
- macro CreateEmptyBigInt(isPositive: bool, length: constexpr int31): BigInt {
- // Allocate a BigInt with the desired length (number of digits).
- let result: BigInt = AllocateBigInt(length);
-
- // Write the desired sign and length to the BigInt bitfield.
- if (isPositive) {
- StoreBigIntBitfield(
- result, DataViewEncodeBigIntBits(kPositiveBigInt, length));
- } else {
- StoreBigIntBitfield(
- result, DataViewEncodeBigIntBits(kNegativeBigInt, length));
- }
-
- return result;
- }
-
// Create a BigInt on a 64-bit architecture from two 32-bit values.
- macro MakeBigIntOn64Bit(
+ macro MakeBigIntOn64Bit(implicit context: Context)(
lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// 0n is represented by a zero-length BigInt.
if (lowWord == 0 && highWord == 0) {
- return AllocateBigInt(kZeroDigitBigInt);
+ return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
}
- let isPositive: bool = true;
- let highPart: intptr = Signed(Convert<uintptr>(highWord));
- let lowPart: intptr = Signed(Convert<uintptr>(lowWord));
+ let sign: uint32 = bigint::kPositiveSign;
+ const highPart: intptr = Signed(Convert<uintptr>(highWord));
+ const lowPart: intptr = Signed(Convert<uintptr>(lowWord));
let rawValue: intptr = (highPart << 32) + lowPart;
if constexpr (signed) {
if (rawValue < 0) {
- isPositive = false;
+ sign = bigint::kNegativeSign;
// We have to store the absolute value of rawValue in the digit.
rawValue = 0 - rawValue;
}
}
// Allocate the BigInt and store the absolute value.
- let result: BigInt = CreateEmptyBigInt(isPositive, kOneDigitBigInt);
-
- StoreBigIntDigit(result, 0, Unsigned(rawValue));
-
- return result;
+ const result: MutableBigInt =
+ bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
+ bigint::StoreBigIntDigit(result, 0, Unsigned(rawValue));
+ return Convert<BigInt>(result);
}
// Create a BigInt on a 32-bit architecture from two 32-bit values.
- macro MakeBigIntOn32Bit(
+ macro MakeBigIntOn32Bit(implicit context: Context)(
lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// 0n is represented by a zero-length BigInt.
if (lowWord == 0 && highWord == 0) {
- return AllocateBigInt(kZeroDigitBigInt);
+ return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
}
// On a 32-bit platform, we might need 1 or 2 digits to store the number.
let needTwoDigits: bool = false;
- let isPositive: bool = true;
+ let sign: uint32 = bigint::kPositiveSign;
// We need to do some math on lowWord and highWord,
// so Convert them to int32.
@@ -293,7 +269,7 @@ namespace data_view {
if constexpr (signed) {
// If highPart < 0, the number is always negative.
if (highPart < 0) {
- isPositive = false;
+ sign = bigint::kNegativeSign;
// We have to compute the absolute value by hand.
// There will be a negative carry from the low word
@@ -322,25 +298,23 @@ namespace data_view {
}
// Allocate the BigInt with the right sign and length.
- let result: BigInt;
+ let result: MutableBigInt;
if (needTwoDigits) {
- result = CreateEmptyBigInt(isPositive, kTwoDigitBigInt);
+ result = bigint::AllocateEmptyBigInt(sign, kTwoDigitBigInt);
} else {
- result = CreateEmptyBigInt(isPositive, kOneDigitBigInt);
+ result = bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
}
// Finally, write the digit(s) to the BigInt.
- StoreBigIntDigit(result, 0, Unsigned(Convert<intptr>(lowPart)));
-
+ bigint::StoreBigIntDigit(result, 0, Unsigned(Convert<intptr>(lowPart)));
if (needTwoDigits) {
- StoreBigIntDigit(result, 1, Unsigned(Convert<intptr>(highPart)));
+ bigint::StoreBigIntDigit(result, 1, Unsigned(Convert<intptr>(highPart)));
}
-
- return result;
+ return Convert<BigInt>(result);
}
- macro MakeBigInt(lowWord: uint32, highWord: uint32, signed: constexpr bool):
- BigInt {
+ macro MakeBigInt(implicit context: Context)(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// A BigInt digit has the platform word size, so we only need one digit
// on 64-bit platforms but may need two on 32-bit.
if constexpr (Is64()) {
@@ -350,19 +324,19 @@ namespace data_view {
}
}
- macro LoadDataViewBigInt(
+ macro LoadDataViewBigInt(implicit context: Context)(
buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
signed: constexpr bool): BigInt {
- let dataPointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = LoadUint8(dataPointer, offset);
- let b1: uint32 = LoadUint8(dataPointer, offset + 1);
- let b2: uint32 = LoadUint8(dataPointer, offset + 2);
- let b3: uint32 = LoadUint8(dataPointer, offset + 3);
- let b4: uint32 = LoadUint8(dataPointer, offset + 4);
- let b5: uint32 = LoadUint8(dataPointer, offset + 5);
- let b6: uint32 = LoadUint8(dataPointer, offset + 6);
- let b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ const dataPointer: RawPtr = buffer.backing_store;
+
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ const b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ const b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ const b7: uint32 = LoadUint8(dataPointer, offset + 7);
let lowWord: uint32;
let highWord: uint32;
@@ -385,7 +359,7 @@ namespace data_view {
transitioning macro DataViewGet(
context: Context, receiver: Object, offset: Object,
requestedLittleEndian: Object, kind: constexpr ElementsKind): Numeric {
- let dataView: JSDataView =
+ const dataView: JSDataView =
ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind));
let getIndex: Number;
@@ -396,25 +370,25 @@ namespace data_view {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let littleEndian: bool = ToBoolean(requestedLittleEndian);
- let buffer: JSArrayBuffer = dataView.buffer;
+ const littleEndian: bool = ToBoolean(requestedLittleEndian);
+ const buffer: JSArrayBuffer = dataView.buffer;
if (IsDetachedBuffer(buffer)) {
ThrowTypeError(kDetachedOperation, MakeDataViewGetterNameString(kind));
}
- let getIndexFloat: float64 = Convert<float64>(getIndex);
- let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+ const getIndexFloat: float64 = Convert<float64>(getIndex);
+ const getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- let viewOffsetWord: uintptr = dataView.byte_offset;
- let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = DataViewElementSize(kind);
+ const viewOffsetWord: uintptr = dataView.byte_offset;
+ const viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ const elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+ const bufferIndex: uintptr = getIndexWord + viewOffsetWord;
if constexpr (kind == UINT8_ELEMENTS) {
return LoadDataView8(buffer, bufferIndex, false);
@@ -442,84 +416,84 @@ namespace data_view {
}
transitioning javascript builtin DataViewPrototypeGetUint8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetInt8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetUint16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, UINT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetInt16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, INT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetUint32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, UINT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetInt32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, INT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetFloat32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, FLOAT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetFloat64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, FLOAT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetBigUint64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, BIGUINT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetBigInt64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, BIGINT64_ELEMENTS);
@@ -539,10 +513,10 @@ namespace data_view {
macro StoreDataView16(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
requestedLittleEndian: bool) {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = value & 0xFF;
- let b1: uint32 = (value >>> 8) & 0xFF;
+ const b0: uint32 = value & 0xFF;
+ const b1: uint32 = (value >>> 8) & 0xFF;
if (requestedLittleEndian) {
StoreWord8(dataPointer, offset, b0);
@@ -556,12 +530,12 @@ namespace data_view {
macro StoreDataView32(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
requestedLittleEndian: bool) {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = value & 0xFF;
- let b1: uint32 = (value >>> 8) & 0xFF;
- let b2: uint32 = (value >>> 16) & 0xFF;
- let b3: uint32 = value >>> 24; // We don't need to mask here.
+ const b0: uint32 = value & 0xFF;
+ const b1: uint32 = (value >>> 8) & 0xFF;
+ const b2: uint32 = (value >>> 16) & 0xFF;
+ const b3: uint32 = value >>> 24; // We don't need to mask here.
if (requestedLittleEndian) {
StoreWord8(dataPointer, offset, b0);
@@ -579,17 +553,17 @@ namespace data_view {
macro StoreDataView64(
buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32,
requestedLittleEndian: bool) {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = lowWord & 0xFF;
- let b1: uint32 = (lowWord >>> 8) & 0xFF;
- let b2: uint32 = (lowWord >>> 16) & 0xFF;
- let b3: uint32 = lowWord >>> 24;
+ const b0: uint32 = lowWord & 0xFF;
+ const b1: uint32 = (lowWord >>> 8) & 0xFF;
+ const b2: uint32 = (lowWord >>> 16) & 0xFF;
+ const b3: uint32 = lowWord >>> 24;
- let b4: uint32 = highWord & 0xFF;
- let b5: uint32 = (highWord >>> 8) & 0xFF;
- let b6: uint32 = (highWord >>> 16) & 0xFF;
- let b7: uint32 = highWord >>> 24;
+ const b4: uint32 = highWord & 0xFF;
+ const b5: uint32 = (highWord >>> 8) & 0xFF;
+ const b6: uint32 = (highWord >>> 16) & 0xFF;
+ const b7: uint32 = highWord >>> 24;
if (requestedLittleEndian) {
StoreWord8(dataPointer, offset, b0);
@@ -612,11 +586,10 @@ namespace data_view {
}
}
- extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(BigInt):
- uint32;
- extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigInt):
+ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(
+ BigIntBase): uint32;
+ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase):
uint32;
- extern macro LoadBigIntDigit(BigInt, constexpr int31): uintptr;
// We might get here a BigInt that is bigger than 64 bits, but we're only
// interested in the 64 lowest ones. This means the lowest BigInt digit
@@ -624,8 +597,8 @@ namespace data_view {
macro StoreDataViewBigInt(
buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
requestedLittleEndian: bool) {
- let length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
- let sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
+ const length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
+ const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
// The 32-bit words that will hold the BigInt's value in
// two's complement representation.
@@ -636,13 +609,13 @@ namespace data_view {
if (length != 0) {
if constexpr (Is64()) {
// There is always exactly 1 BigInt digit to load in this case.
- let value: uintptr = LoadBigIntDigit(bigIntValue, 0);
+ const value: uintptr = bigint::LoadBigIntDigit(bigIntValue, 0);
lowWord = Convert<uint32>(value); // Truncates value to 32 bits.
highWord = Convert<uint32>(value >>> 32);
} else { // There might be either 1 or 2 BigInt digits we need to load.
- lowWord = Convert<uint32>(LoadBigIntDigit(bigIntValue, 0));
+ lowWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 0));
if (length >= 2) { // Only load the second digit if there is one.
- highWord = Convert<uint32>(LoadBigIntDigit(bigIntValue, 1));
+ highWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 1));
}
}
}
@@ -661,7 +634,7 @@ namespace data_view {
transitioning macro DataViewSet(
context: Context, receiver: Object, offset: Object, value: Object,
requestedLittleEndian: Object, kind: constexpr ElementsKind): Object {
- let dataView: JSDataView =
+ const dataView: JSDataView =
ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind));
let getIndex: Number;
@@ -672,52 +645,52 @@ namespace data_view {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let littleEndian: bool = ToBoolean(requestedLittleEndian);
- let buffer: JSArrayBuffer = dataView.buffer;
+ const littleEndian: bool = ToBoolean(requestedLittleEndian);
+ const buffer: JSArrayBuffer = dataView.buffer;
// According to ES6 section 24.2.1.2 SetViewValue, we must perform
// the conversion before doing the bounds check.
if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
- let bigIntValue: BigInt = ToBigInt(context, value);
+ const bigIntValue: BigInt = ToBigInt(context, value);
if (IsDetachedBuffer(buffer)) {
ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind));
}
- let getIndexFloat: float64 = Convert<float64>(getIndex);
- let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+ const getIndexFloat: float64 = Convert<float64>(getIndex);
+ const getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- let viewOffsetWord: uintptr = dataView.byte_offset;
- let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = DataViewElementSize(kind);
+ const viewOffsetWord: uintptr = dataView.byte_offset;
+ const viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ const elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+ const bufferIndex: uintptr = getIndexWord + viewOffsetWord;
StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian);
} else {
- let numValue: Number = ToNumber(context, value);
+ const numValue: Number = ToNumber(context, value);
if (IsDetachedBuffer(buffer)) {
ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind));
}
- let getIndexFloat: float64 = Convert<float64>(getIndex);
- let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+ const getIndexFloat: float64 = Convert<float64>(getIndex);
+ const getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- let viewOffsetWord: uintptr = dataView.byte_offset;
- let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = DataViewElementSize(kind);
+ const viewOffsetWord: uintptr = dataView.byte_offset;
+ const viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ const elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+ const bufferIndex: uintptr = getIndexWord + viewOffsetWord;
- let doubleValue: float64 = ChangeNumberToFloat64(numValue);
+ const doubleValue: float64 = ChangeNumberToFloat64(numValue);
if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) {
StoreDataView8(
@@ -731,13 +704,13 @@ namespace data_view {
buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
littleEndian);
} else if constexpr (kind == FLOAT32_ELEMENTS) {
- let floatValue: float32 = TruncateFloat64ToFloat32(doubleValue);
+ const floatValue: float32 = TruncateFloat64ToFloat32(doubleValue);
StoreDataView32(
buffer, bufferIndex, BitcastFloat32ToInt32(floatValue),
littleEndian);
} else if constexpr (kind == FLOAT64_ELEMENTS) {
- let lowWord: uint32 = Float64ExtractLowWord32(doubleValue);
- let highWord: uint32 = Float64ExtractHighWord32(doubleValue);
+ const lowWord: uint32 = Float64ExtractLowWord32(doubleValue);
+ const highWord: uint32 = Float64ExtractHighWord32(doubleValue);
StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian);
}
}
@@ -745,96 +718,96 @@ namespace data_view {
}
transitioning javascript builtin DataViewPrototypeSetUint8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
return DataViewSet(
context, receiver, offset, value, Undefined, UINT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetInt8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
return DataViewSet(
context, receiver, offset, value, Undefined, INT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetUint16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, UINT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetInt16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, INT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetUint32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, UINT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetInt32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, INT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetFloat32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, FLOAT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetFloat64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, FLOAT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetBigUint64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, BIGUINT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetBigInt64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, BIGINT64_ELEMENTS);
diff --git a/deps/v8/src/builtins/extras-utils.tq b/deps/v8/src/builtins/extras-utils.tq
index 2b9b79739e..3675fda191 100644
--- a/deps/v8/src/builtins/extras-utils.tq
+++ b/deps/v8/src/builtins/extras-utils.tq
@@ -8,17 +8,18 @@ namespace extras_utils {
extern runtime PromiseStatus(Context, Object): Smi;
javascript builtin ExtrasUtilsCreatePrivateSymbol(
- context: Context, receiver: Object, ...arguments): HeapObject {
+ js-implicit context: Context,
+ receiver: Object)(...arguments): HeapObject {
return CreatePrivateSymbol(context, arguments[0]);
}
javascript builtin ExtrasUtilsMarkPromiseAsHandled(
- context: Context, receiver: Object, ...arguments): Undefined {
+ js-implicit context: Context, receiver: Object)(...arguments): Undefined {
return PromiseMarkAsHandled(context, arguments[0]);
}
javascript builtin ExtrasUtilsPromiseState(
- context: Context, receiver: Object, ...arguments): Smi {
+ js-implicit context: Context, receiver: Object)(...arguments): Smi {
return PromiseStatus(context, arguments[0]);
}
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 0d80c681fb..995be77f75 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -1023,10 +1023,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
// Push bytecode array.
@@ -1534,6 +1534,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
eax);
}
+
+ // Replace the builtin index Smi on the stack with the start address of the
+ // builtin loaded from the builtins table. The ret below will return to this
+ // address.
+ int offset_to_builtin_index = allocatable_register_count * kSystemPointerSize;
+ __ mov(eax, Operand(esp, offset_to_builtin_index));
+ __ LoadEntryFromBuiltinIndex(eax);
+ __ mov(Operand(esp, offset_to_builtin_index), eax);
+
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
__ pop(Register::from_code(code));
@@ -1549,7 +1558,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize;
__ pop(Operand(esp, offsetToPC));
__ Drop(offsetToPC / kSystemPointerSize);
- __ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ ret(0);
}
} // namespace
@@ -3012,23 +3020,28 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ mov(esi, __ ExternalReferenceAsOperand(next_address, esi));
__ mov(edi, __ ExternalReferenceAsOperand(limit_address, edi));
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Move(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
__ cmpb(Operand(eax, 0), Immediate(0));
- __ j(zero, &profiler_disabled);
+ __ j(not_zero, &profiler_enabled);
+ __ Move(eax, Immediate(ExternalReference::address_of_runtime_stats_flag()));
+ __ cmp(Operand(eax, 0), Immediate(0));
+ __ j(not_zero, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ mov(eax, function_address);
+ __ jmp(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual getter function.
+ __ mov(thunk_last_arg, function_address);
+ __ Move(eax, Immediate(thunk_ref));
+ }
+ __ bind(&end_profiler_check);
- // Additional parameter is the address of the actual getter function.
- __ mov(thunk_last_arg, function_address);
// Call the api function.
- __ Move(eax, Immediate(thunk_ref));
__ call(eax);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- // Call the api function.
- __ call(function_address);
- __ bind(&end_profiler_check);
Label prologue;
// Load the value from ReturnValue
@@ -3080,6 +3093,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ CompareRoot(map, RootIndex::kHeapNumberMap);
__ j(equal, &ok, Label::kNear);
+ __ CompareRoot(map, RootIndex::kBigIntMap);
+ __ j(equal, &ok, Label::kNear);
+
__ CompareRoot(return_value, RootIndex::kUndefinedValue);
__ j(equal, &ok, Label::kNear);
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index 4e75c6d837..d96fa924ab 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -28,6 +28,8 @@ namespace internal_coverage {
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
}
+ @export // Silence unused warning on release builds. SlotCount is only used
+ // in an assert. TODO(szuend): Remove once macros and asserts work.
macro SlotCount(coverageInfo: CoverageInfo): Smi {
assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask));
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 5c9439dfc7..b770f1b652 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -20,16 +20,16 @@ namespace iterator {
implicit context: Context)(Object): IteratorRecord;
extern macro IteratorBuiltinsAssembler::IteratorStep(
- implicit context: Context)(IteratorRecord): Object
+ implicit context: Context)(IteratorRecord): JSReceiver
labels Done;
extern macro IteratorBuiltinsAssembler::IteratorStep(
- implicit context: Context)(IteratorRecord, Map): Object
+ implicit context: Context)(IteratorRecord, Map): JSReceiver
labels Done;
extern macro IteratorBuiltinsAssembler::IteratorValue(
- implicit context: Context)(Object): Object;
+ implicit context: Context)(JSReceiver): Object;
extern macro IteratorBuiltinsAssembler::IteratorValue(
- implicit context: Context)(Object, Map): Object;
+ implicit context: Context)(JSReceiver, Map): Object;
extern macro IteratorBuiltinsAssembler::IteratorCloseOnException(
implicit context: Context)(IteratorRecord, Object): never;
diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq
index 84dd1261fa..df43b30efc 100644
--- a/deps/v8/src/builtins/math.tq
+++ b/deps/v8/src/builtins/math.tq
@@ -7,7 +7,7 @@ namespace math {
extern macro Float64Acos(float64): float64;
transitioning javascript builtin
- MathAcos(context: Context, receiver: Object, x: Object): Number {
+ MathAcos(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Acos(value));
}
@@ -16,7 +16,7 @@ namespace math {
extern macro Float64Acosh(float64): float64;
transitioning javascript builtin
- MathAcosh(context: Context, receiver: Object, x: Object): Number {
+ MathAcosh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Acosh(value));
}
@@ -25,7 +25,7 @@ namespace math {
extern macro Float64Asin(float64): float64;
transitioning javascript builtin
- MathAsin(context: Context, receiver: Object, x: Object): Number {
+ MathAsin(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Asin(value));
}
@@ -34,7 +34,7 @@ namespace math {
extern macro Float64Asinh(float64): float64;
transitioning javascript builtin
- MathAsinh(context: Context, receiver: Object, x: Object): Number {
+ MathAsinh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Asinh(value));
}
@@ -43,7 +43,7 @@ namespace math {
extern macro Float64Atan(float64): float64;
transitioning javascript builtin
- MathAtan(context: Context, receiver: Object, x: Object): Number {
+ MathAtan(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Atan(value));
}
@@ -52,7 +52,7 @@ namespace math {
extern macro Float64Atan2(float64, float64): float64;
transitioning javascript builtin
- MathAtan2(context: Context, receiver: Object, y: Object, x: Object): Number {
+ MathAtan2(context: Context, _receiver: Object, y: Object, x: Object): Number {
const yValue = Convert<float64>(ToNumber_Inline(context, y));
const xValue = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Atan2(yValue, xValue));
@@ -62,7 +62,7 @@ namespace math {
extern macro Float64Atanh(float64): float64;
transitioning javascript builtin
- MathAtanh(context: Context, receiver: Object, x: Object): Number {
+ MathAtanh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Atanh(value));
}
@@ -71,7 +71,7 @@ namespace math {
extern macro Float64Cbrt(float64): float64;
transitioning javascript builtin
- MathCbrt(context: Context, receiver: Object, x: Object): Number {
+ MathCbrt(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Cbrt(value));
}
@@ -80,7 +80,7 @@ namespace math {
extern macro Word32Clz(int32): int32;
transitioning javascript builtin
- MathClz32(context: Context, receiver: Object, x: Object): Number {
+ MathClz32(context: Context, _receiver: Object, x: Object): Number {
const num = ToNumber_Inline(context, x);
let value: int32;
@@ -100,7 +100,7 @@ namespace math {
extern macro Float64Cos(float64): float64;
transitioning javascript builtin
- MathCos(context: Context, receiver: Object, x: Object): Number {
+ MathCos(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Cos(value));
}
@@ -109,7 +109,7 @@ namespace math {
extern macro Float64Cosh(float64): float64;
transitioning javascript builtin
- MathCosh(context: Context, receiver: Object, x: Object): Number {
+ MathCosh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Cosh(value));
}
@@ -118,7 +118,7 @@ namespace math {
extern macro Float64Exp(float64): float64;
transitioning javascript builtin
- MathExp(context: Context, receiver: Object, x: Object): Number {
+ MathExp(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Exp(value));
}
@@ -127,14 +127,14 @@ namespace math {
extern macro Float64Expm1(float64): float64;
transitioning javascript builtin
- MathExpm1(context: Context, receiver: Object, x: Object): Number {
+ MathExpm1(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Expm1(value));
}
// ES6 #sec-math.fround
transitioning javascript builtin
- MathFround(context: Context, receiver: Object, x: Object): Number {
+ MathFround(context: Context, _receiver: Object, x: Object): Number {
const x32 = Convert<float32>(ToNumber_Inline(context, x));
const x64 = Convert<float64>(x32);
return Convert<Number>(x64);
@@ -144,7 +144,7 @@ namespace math {
extern macro Float64Log(float64): float64;
transitioning javascript builtin
- MathLog(context: Context, receiver: Object, x: Object): Number {
+ MathLog(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log(value));
}
@@ -153,7 +153,7 @@ namespace math {
extern macro Float64Log1p(float64): float64;
transitioning javascript builtin
- MathLog1p(context: Context, receiver: Object, x: Object): Number {
+ MathLog1p(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log1p(value));
}
@@ -162,7 +162,7 @@ namespace math {
extern macro Float64Log10(float64): float64;
transitioning javascript builtin
- MathLog10(context: Context, receiver: Object, x: Object): Number {
+ MathLog10(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log10(value));
}
@@ -171,7 +171,7 @@ namespace math {
extern macro Float64Log2(float64): float64;
transitioning javascript builtin
- MathLog2(context: Context, receiver: Object, x: Object): Number {
+ MathLog2(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log2(value));
}
@@ -180,14 +180,14 @@ namespace math {
extern macro Float64Sin(float64): float64;
transitioning javascript builtin
- MathSin(context: Context, receiver: Object, x: Object): Number {
+ MathSin(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Sin(value));
}
// ES6 #sec-math.sign
transitioning javascript builtin
- MathSign(context: Context, receiver: Object, x: Object): Number {
+ MathSign(context: Context, _receiver: Object, x: Object): Number {
const num = ToNumber_Inline(context, x);
const value = Convert<float64>(num);
@@ -204,7 +204,7 @@ namespace math {
extern macro Float64Sinh(float64): float64;
transitioning javascript builtin
- MathSinh(context: Context, receiver: Object, x: Object): Number {
+ MathSinh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Sinh(value));
}
@@ -213,7 +213,7 @@ namespace math {
extern macro Float64Sqrt(float64): float64;
transitioning javascript builtin
- MathSqrt(context: Context, receiver: Object, x: Object): Number {
+ MathSqrt(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Sqrt(value));
}
@@ -222,7 +222,7 @@ namespace math {
extern macro Float64Tan(float64): float64;
transitioning javascript builtin
- MathTan(context: Context, receiver: Object, x: Object): Number {
+ MathTan(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Tan(value));
}
@@ -231,7 +231,7 @@ namespace math {
extern macro Float64Tanh(float64): float64;
transitioning javascript builtin
- MathTanh(context: Context, receiver: Object, x: Object): Number {
+ MathTanh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Tanh(value));
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index ec65c78ee9..a359b2436f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
// -- a1 : target function (preserved for callee)
// -- a3 : new target (preserved for callee)
// -----------------------------------
@@ -70,14 +69,12 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ SmiTag(a0);
- __ Push(a0, a1, a3, a1);
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
// Restore target function and new target.
- __ Pop(a0, a1, a3);
- __ SmiUntag(a0);
+ __ Pop(a1, a3);
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
@@ -853,13 +850,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1035,17 +1030,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(closure);
-
-
// Increment invocation count for the function.
__ lw(t0, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@@ -1053,10 +1049,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Reset code age.
- DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
- __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kBytecodeAgeOffset));
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1464,11 +1471,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
__ lw(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
__ Pop(t0);
__ Addu(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(ra);
- __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadEntryFromBuiltinIndex(t0);
__ Jump(t0);
}
} // namespace
@@ -2559,7 +2568,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ LoadRoot(t0, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, t0, Operand(a2));
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2825,18 +2834,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address == a1 || function_address == a2);
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ li(t9, ExternalReference::is_profiling_address(isolate));
__ lb(t9, MemOperand(t9, 0));
- __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- __ li(t9, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- __ mov(t9, function_address);
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ __ li(t9, ExternalReference::address_of_runtime_stats_flag());
+ __ lw(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ mov(t9, function_address);
+ __ Branch(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 34a5774d65..c5565b90de 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
// -- a1 : target function (preserved for callee)
// -- a3 : new target (preserved for callee)
// -----------------------------------
@@ -70,13 +69,11 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push a copy of the target function and the new target.
- __ SmiTag(a0);
- __ Push(a0, a1, a3, a1);
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
// Restore target function and new target.
- __ Pop(a0, a1, a3);
- __ SmiUntag(a0);
+ __ Pop(a1, a3);
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
@@ -870,13 +867,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1052,16 +1047,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(closure);
-
// Increment invocation count for the function.
__ Lw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@@ -1069,10 +1066,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Reset code age.
- DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
- __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kBytecodeAgeOffset));
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1479,11 +1487,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
__ Ld(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
__ Pop(t0);
__ Daddu(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(ra);
- __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadEntryFromBuiltinIndex(t0);
__ Jump(t0);
}
} // namespace
@@ -2595,7 +2605,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ LoadRoot(a4, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, a4, Operand(a2));
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2864,18 +2874,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address == a1 || function_address == a2);
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ li(t9, ExternalReference::is_profiling_address(isolate));
__ Lb(t9, MemOperand(t9, 0));
- __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- __ li(t9, thunk_ref);
- __ jmp(&end_profiler_check);
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ __ li(t9, ExternalReference::address_of_runtime_stats_flag());
+ __ Lw(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ mov(t9, function_address);
+ __ Branch(&end_profiler_check);
+ }
- __ bind(&profiler_disabled);
- __ mov(t9, function_address);
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index 93851d4e11..32115e78ea 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -33,8 +33,8 @@ namespace object {
}
transitioning javascript builtin
- ObjectFromEntries(implicit context: Context)(receiver: Object, ...arguments):
- Object {
+ ObjectFromEntries(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
const iterable: Object = arguments[0];
try {
if (IsNullOrUndefined(iterable)) goto Throw;
@@ -47,7 +47,8 @@ namespace object {
try {
assert(!IsNullOrUndefined(i.object));
while (true) {
- const step: Object = iterator::IteratorStep(i, fastIteratorResultMap)
+ const step: JSReceiver =
+ iterator::IteratorStep(i, fastIteratorResultMap)
otherwise return result;
const iteratorValue: Object =
iterator::IteratorValue(step, fastIteratorResultMap);
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
new file mode 100644
index 0000000000..6706a8f943
--- /dev/null
+++ b/deps/v8/src/builtins/object.tq
@@ -0,0 +1,138 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace runtime {
+ extern transitioning runtime
+ ObjectIsExtensible(implicit context: Context)(Object): Object;
+
+ extern transitioning runtime
+ JSReceiverPreventExtensionsThrow(implicit context: Context)(JSReceiver):
+ Object;
+
+ extern transitioning runtime
+ JSReceiverPreventExtensionsDontThrow(implicit context: Context)(JSReceiver):
+ Object;
+
+ extern transitioning runtime
+ JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): Object;
+
+ extern transitioning runtime
+ JSReceiverSetPrototypeOfThrow(implicit context: Context)(JSReceiver, Object):
+ Object;
+
+ extern transitioning runtime
+ JSReceiverSetPrototypeOfDontThrow(implicit context:
+ Context)(JSReceiver, Object): Object;
+} // namespace runtime
+
+namespace object {
+ transitioning macro
+ ObjectIsExtensible(implicit context: Context)(object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::ObjectIsExtensible(objectJSReceiver);
+ return proxy::ProxyIsExtensible(objectJSProxy);
+ }
+
+ transitioning macro
+ ObjectPreventExtensionsThrow(implicit context: Context)(object: Object):
+ Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverPreventExtensionsThrow(
+ objectJSReceiver);
+ proxy::ProxyPreventExtensions(objectJSProxy, True);
+ return objectJSReceiver;
+ }
+
+ transitioning macro
+ ObjectPreventExtensionsDontThrow(implicit context: Context)(object: Object):
+ Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverPreventExtensionsDontThrow(
+ objectJSReceiver);
+ return proxy::ProxyPreventExtensions(objectJSProxy, False);
+ }
+
+ transitioning macro
+ ObjectGetPrototypeOf(implicit context: Context)(object: Object): Object {
+ const objectJSReceiver: JSReceiver = ToObject_Inline(context, object);
+ return object::JSReceiverGetPrototypeOf(objectJSReceiver);
+ }
+
+ transitioning macro
+ JSReceiverGetPrototypeOf(implicit context: Context)(object: JSReceiver):
+ Object {
+ const objectJSProxy = Cast<JSProxy>(object)
+ otherwise return runtime::JSReceiverGetPrototypeOf(object);
+ return proxy::ProxyGetPrototypeOf(objectJSProxy);
+ }
+
+ transitioning macro
+ ObjectSetPrototypeOfThrow(implicit context: Context)(
+ object: Object, proto: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverSetPrototypeOfThrow(
+ objectJSReceiver, proto);
+ proxy::ProxySetPrototypeOf(objectJSProxy, proto, True);
+ return objectJSReceiver;
+ }
+
+ transitioning macro
+ ObjectSetPrototypeOfDontThrow(implicit context: Context)(
+ object: Object, proto: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverSetPrototypeOfDontThrow(
+ objectJSReceiver, proto);
+ return proxy::ProxySetPrototypeOf(objectJSProxy, proto, False);
+ }
+} // namespace object
+
+namespace object_isextensible {
+ // ES6 section 19.1.2.11 Object.isExtensible ( O )
+ transitioning javascript builtin ObjectIsExtensible(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ return object::ObjectIsExtensible(object);
+ }
+} // namespace object_isextensible
+
+namespace object_preventextensions {
+ // ES6 section 19.1.2.11 Object.isExtensible ( O )
+ transitioning javascript builtin ObjectPreventExtensions(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ return object::ObjectPreventExtensionsThrow(object);
+ }
+} // namespace object_preventextensions
+
+namespace object_getprototypeof {
+ // ES6 section 19.1.2.9 Object.getPrototypeOf ( O )
+ transitioning javascript builtin ObjectGetPrototypeOf(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ return object::ObjectGetPrototypeOf(object);
+ }
+} // namespace object_getprototypeof
+
+namespace object_setprototypeof {
+ // ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
+ transitioning javascript builtin ObjectSetPrototypeOf(
+ js-implicit context:
+ Context)(_receiver: Object, object: Object, proto: Object): Object {
+ // 1. Set O to ? RequireObjectCoercible(O).
+ RequireObjectCoercible(object, 'Object.setPrototypeOf');
+
+ // 2. If Type(proto) is neither Object nor Null, throw a TypeError
+ // exception.
+ // 3. If Type(O) is not Object, return O.
+ // 4. Let status be ? O.[[SetPrototypeOf]](proto).
+ // 5. If status is false, throw a TypeError exception.
+ // 6. Return O.
+ if (proto == Null || Is<JSReceiver>(proto)) {
+ return object::ObjectSetPrototypeOfThrow(object, proto);
+ }
+ ThrowTypeError(kProtoObjectOrNull, proto);
+ }
+} // namespace object_setprototypeof
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index e3c6ce6407..a42cb9bebd 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
// -- r4 : target function (preserved for callee)
// -- r6 : new target (preserved for callee)
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the number of arguments to the callee.
// Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ SmiTag(r3);
- __ Push(r3, r4, r6, r4);
+ __ Push(r4, r6, r4);
__ CallRuntime(function_id, 1);
__ mr(r5, r3);
// Restore target function and new target.
- __ Pop(r3, r4, r6);
- __ SmiUntag(r3);
+ __ Pop(r4, r6);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ JumpCodeObject(r5);
@@ -110,6 +106,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Register scratch = r5;
+
Label stack_overflow;
Generate_StackOverflowCheck(masm, r3, r8, &stack_overflow);
@@ -141,13 +139,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[2*kPointerSize]: context
// -----------------------------------
__ beq(&no_args, cr0);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ sub(sp, sp, ip);
+ __ ShiftLeftImm(scratch, r3, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, scratch);
__ mtctr(r3);
__ bind(&loop);
- __ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r7, ip));
- __ StorePX(r0, MemOperand(sp, ip));
+ __ subi(scratch, scratch, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r7, scratch));
+ __ StorePX(r0, MemOperand(sp, scratch));
__ bdnz(&loop);
__ bind(&no_args);
@@ -300,13 +298,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
__ cmpi(r3, Operand::Zero());
__ beq(&no_args);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ sub(sp, sp, ip);
+ __ ShiftLeftImm(r9, r3, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, r9);
__ mtctr(r3);
__ bind(&loop);
- __ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r7, ip));
- __ StorePX(r0, MemOperand(sp, ip));
+ __ subi(r9, r9, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r7, r9));
+ __ StorePX(r0, MemOperand(sp, r9));
__ bdnz(&loop);
__ bind(&no_args);
@@ -416,12 +414,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
+ Register scratch = r8;
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ Move(ip, debug_hook);
- __ LoadByte(ip, MemOperand(ip), r0);
- __ extsb(ip, ip);
- __ CmpSmiLiteral(ip, Smi::zero(), r0);
+ __ Move(scratch, debug_hook);
+ __ LoadByte(scratch, MemOperand(scratch), r0);
+ __ extsb(scratch, scratch);
+ __ CmpSmiLiteral(scratch, Smi::zero(), r0);
__ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -429,9 +428,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ Move(ip, debug_suspended_generator);
- __ LoadP(ip, MemOperand(ip));
- __ cmp(ip, r4);
+ __ Move(scratch, debug_suspended_generator);
+ __ LoadP(scratch, MemOperand(scratch));
+ __ cmp(scratch, r4);
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -442,8 +441,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&stack_overflow);
// Push receiver.
- __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
- __ Push(ip);
+ __ LoadP(scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+ __ Push(scratch);
// ----------- S t a t e -------------
// -- r4 : the JSGeneratorObject to resume
@@ -470,8 +469,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mtctr(r6);
__ bind(&loop);
- __ LoadPU(ip, MemOperand(r9, kPointerSize));
- __ push(ip);
+ __ LoadPU(scratch, MemOperand(r9, kPointerSize));
+ __ push(scratch);
__ bdnz(&loop);
__ bind(&done_loop);
@@ -602,6 +601,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ LoadP(r0, MemOperand(r3));
__ push(r0);
+ Register scratch = r9;
// Set up frame pointer for the frame to be pushed.
__ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -611,17 +611,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
masm->isolate());
__ Move(r3, js_entry_sp);
- __ LoadP(r9, MemOperand(r3));
- __ cmpi(r9, Operand::Zero());
+ __ LoadP(scratch, MemOperand(r3));
+ __ cmpi(scratch, Operand::Zero());
__ bne(&non_outermost_js);
__ StoreP(fp, MemOperand(r3));
- __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
- __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
- __ push(ip); // frame-type
+ __ push(scratch); // frame-type
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -642,12 +642,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ Move(ip,
- ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
- masm->isolate()));
+ __ Move(scratch,
+ ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
}
- __ StoreP(r3, MemOperand(ip));
+ __ StoreP(r3, MemOperand(scratch));
__ LoadRoot(r3, RootIndex::kException);
__ b(&exit);
@@ -679,16 +679,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ pop(r8);
__ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ bne(&non_outermost_js_2);
- __ mov(r9, Operand::Zero());
+ __ mov(scratch, Operand::Zero());
__ Move(r8, js_entry_sp);
- __ StoreP(r9, MemOperand(r8));
+ __ StoreP(scratch, MemOperand(r8));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(r6);
- __ Move(ip, ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate()));
- __ StoreP(r6, MemOperand(ip));
+ __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ StoreP(r6, MemOperand(scratch));
// Reset the stack to the callee saved registers.
__ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -894,13 +894,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee if needed, and caller)
- // -- r3 : new target (preserved for callee if needed, and caller)
- // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- r6 : new target (preserved for callee if needed, and caller)
+ // -- r4 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1084,6 +1082,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ LoadP(r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadHalfWord(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
+ __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
+ __ bne(&push_stack_frame);
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
@@ -1102,6 +1109,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
+
+ __ bind(&push_stack_frame);
+
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1109,12 +1119,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ li(r8, Operand(0));
__ StoreHalfWord(r8,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
r0);
// Load initial bytecode offset.
@@ -1395,11 +1405,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
__ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, ip, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2));
__ LoadPX(kJavaScriptCallCodeStartRegister,
- MemOperand(kInterpreterDispatchTableRegister, ip));
+ MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
}
@@ -1526,13 +1538,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
- __ Pop(ip);
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ UseScratchRegisterScope temps(masm);
+ Register builtin = temps.Acquire();
+ __ Pop(builtin);
__ addi(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(r0);
__ mtlr(r0);
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ Jump(builtin);
}
} // namespace
@@ -1702,14 +1718,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r3: actual number of arguments
// r4: callable
{
+ Register scratch = r6;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r5, sp, r5);
__ mtctr(r3);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r5, -kPointerSize));
- __ StoreP(ip, MemOperand(r5));
+ __ LoadP(scratch, MemOperand(r5, -kPointerSize));
+ __ StoreP(scratch, MemOperand(r5));
__ subi(r5, r5, Operand(kPointerSize));
__ bdnz(&loop);
// Adjust the actual number of arguments and remove the top element
@@ -1891,7 +1908,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -1902,12 +1919,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ mtctr(r7);
__ bind(&loop);
- __ LoadPU(ip, MemOperand(r5, kPointerSize));
- __ CompareRoot(ip, RootIndex::kTheHoleValue);
+ __ LoadPU(scratch, MemOperand(r5, kPointerSize));
+ __ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
- __ LoadRoot(ip, RootIndex::kUndefinedValue);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
- __ push(ip);
+ __ push(scratch);
__ bdnz(&loop);
__ bind(&no_args);
__ add(r3, r3, r7);
@@ -1953,8 +1970,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ LoadP(scratch,
+ MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ cmpi(scratch,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ beq(&arguments_adaptor);
{
__ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1988,9 +2007,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ add(r3, r3, r8);
__ bind(&loop);
{
- __ ShiftLeftImm(ip, r8, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(r7, ip));
- __ push(ip);
+ __ ShiftLeftImm(scratch, r8, Operand(kPointerSizeLog2));
+ __ LoadPX(scratch, MemOperand(r7, scratch));
+ __ push(scratch);
__ subi(r8, r8, Operand(1));
__ cmpi(r8, Operand::Zero());
__ bne(&loop);
@@ -2134,10 +2153,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- r7 : the number of [[BoundArguments]]
// -----------------------------------
+ Register scratch = r9;
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ mr(r9, sp); // preserve previous stack pointer
+ __ mr(scratch, sp); // preserve previous stack pointer
__ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2));
__ sub(sp, sp, r10);
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2146,7 +2166,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ CompareRoot(sp, RootIndex::kRealStackLimit);
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
- __ mr(sp, r9);
+ __ mr(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2166,7 +2186,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ beq(&skip);
__ mtctr(r3);
__ bind(&loop);
- __ LoadPX(r0, MemOperand(r9, r8));
+ __ LoadPX(r0, MemOperand(scratch, r8));
__ StorePX(r0, MemOperand(sp, r8));
__ addi(r8, r8, Operand(kPointerSize));
__ bdnz(&loop);
@@ -2201,9 +2221,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
- __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
+ __ LoadP(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ StorePX(ip, MemOperand(sp, r0));
+ __ StorePX(r6, MemOperand(sp, r0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2388,7 +2408,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
r0);
__ bne(&skip_adapt_arguments, cr0);
@@ -2686,7 +2706,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CompareRoot(r6, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2961,13 +2981,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Move(scratch, thunk_ref);
__ isel(eq, scratch, function_address, scratch);
} else {
- Label profiler_disabled;
- Label end_profiler_check;
- __ beq(&profiler_disabled);
- __ Move(scratch, thunk_ref);
- __ b(&end_profiler_check);
- __ bind(&profiler_disabled);
- __ mr(scratch, function_address);
+ Label profiler_enabled, end_profiler_check;
+ __ bne(&profiler_enabled);
+ __ Move(scratch, ExternalReference::address_of_runtime_stats_flag());
+ __ lwz(scratch, MemOperand(scratch, 0));
+ __ cmpi(scratch, Operand::Zero());
+ __ bne(&profiler_enabled);
+ {
+ // Call the api function directly.
+ __ mr(scratch, function_address);
+ __ b(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Move(scratch, thunk_ref);
+ }
__ bind(&end_profiler_check);
}
@@ -3264,6 +3293,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
}
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ Register temp2 = temps.Acquire();
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
__ mflr(r0);
@@ -3271,11 +3302,11 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
if (ABI_USES_FUNCTION_DESCRIPTORS && FLAG_embedded_builtins) {
// AIX/PPC64BE Linux use a function descriptor;
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
- __ LoadP(ip, MemOperand(ip, 0)); // Instruction address
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(temp2, kPointerSize));
+ __ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address
}
- __ Call(ip); // Call the C++ function.
+ __ Call(temp2); // Call the C++ function.
__ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ mtlr(r0);
__ blr();
diff --git a/deps/v8/src/builtins/proxy-constructor.tq b/deps/v8/src/builtins/proxy-constructor.tq
index 178759b595..ad60c20e2c 100644
--- a/deps/v8/src/builtins/proxy-constructor.tq
+++ b/deps/v8/src/builtins/proxy-constructor.tq
@@ -6,17 +6,14 @@
namespace proxy {
- extern macro ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget():
- Object;
-
// ES #sec-proxy-constructor
// https://tc39.github.io/ecma262/#sec-proxy-constructor
transitioning javascript builtin
- ProxyConstructor(implicit context: Context)(
- receiver: Object, target: Object, handler: Object): JSProxy {
+ ProxyConstructor(
+ js-implicit context: Context, receiver: Object,
+ newTarget: Object)(target: Object, handler: Object): JSProxy {
try {
// 1. If NewTarget is undefined, throw a TypeError exception.
- const newTarget: Object = GetProxyConstructorJSNewTarget();
if (newTarget == Undefined) {
ThrowTypeError(kConstructorNotFunction, 'Proxy');
}
diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq
new file mode 100644
index 0000000000..759de766ef
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-delete-property.tq
@@ -0,0 +1,67 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-delete-p
+ // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-delete-p
+ transitioning builtin
+ ProxyDeleteProperty(implicit context: Context)(
+ proxy: JSProxy, name: Name, languageMode: LanguageMode): Object {
+ const kTrapName: constexpr string = 'deleteProperty';
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(TaggedIsNotSmi(name));
+ assert(IsName(name));
+ assert(!IsPrivateSymbol(name));
+
+ try {
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = UnsafeCast<JSReceiver>(proxy.target);
+
+ // 6. Let trap be ? GetMethod(handler, "deleteProperty").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
+ // « target, P »)).
+ const trapResult = Call(context, trap, handler, target, name);
+
+ // 9. If booleanTrapResult is false, return false.
+ if (BranchIfToBooleanIsFalse(trapResult)) {
+ if (languageMode == SmiConstant(kStrict)) {
+ ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName, name);
+ }
+ return False;
+ }
+
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ // 11. If targetDesc is undefined, return true.
+ // 12. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ // 13. Let extensibleTarget be ? IsExtensible(target).
+ // 14. If extensibleTarget is false, throw a TypeError exception.
+ CheckDeleteTrapResult(target, proxy, name);
+
+ // 15. Return true.
+ return True;
+ }
+ label TrapUndefined(target: Object) {
+ // 7.a. Return ? target.[[Delete]](P).
+ return DeleteProperty(target, name, languageMode);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq
index 0915a66d5f..bac07f550c 100644
--- a/deps/v8/src/builtins/proxy-get-property.tq
+++ b/deps/v8/src/builtins/proxy-get-property.tq
@@ -6,9 +6,8 @@
namespace proxy {
- extern transitioning runtime
- GetPropertyWithReceiver(implicit context: Context)(Object, Name, Object, Smi):
- Object;
+ extern transitioning builtin GetPropertyWithReceiver(
+ implicit context: Context)(Object, Name, Object, Smi): Object;
// ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
@@ -16,36 +15,38 @@ namespace proxy {
ProxyGetProperty(implicit context: Context)(
proxy: JSProxy, name: Name, receiverValue: Object,
onNonExistent: Smi): Object {
+ PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
assert(TaggedIsNotSmi(name));
assert(IsName(name));
assert(!IsPrivateSymbol(name));
// 2. Let handler be O.[[ProxyHandler]].
- const handler: Object = proxy.handler;
-
// 3. If handler is null, throw a TypeError exception.
- if (handler == Null) {
- ThrowTypeError(kProxyRevoked, 'get');
- }
-
// 4. Assert: Type(handler) is Object.
- const handlerJSReceiver = UnsafeCast<JSReceiver>(handler);
+ let handler: JSReceiver;
+ typeswitch (proxy.handler) {
+ case (Null): {
+ ThrowTypeError(kProxyRevoked, 'get');
+ }
+ case (h: JSReceiver): {
+ handler = h;
+ }
+ }
// 5. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
// 6. Let trap be ? GetMethod(handler, "get").
// 7. If trap is undefined, then (see 7.a below).
// 7.a. Return ? target.[[Get]](P, Receiver).
- // TODO(mslekova): Introduce GetPropertyWithReceiver stub
- const trap: Callable = GetMethod(handlerJSReceiver, 'get')
+ const trap: Callable = GetMethod(handler, 'get')
otherwise return GetPropertyWithReceiver(
target, name, receiverValue, onNonExistent);
// 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »).
const trapResult =
- Call(context, trap, handlerJSReceiver, target, name, receiverValue);
+ Call(context, trap, handler, target, name, receiverValue);
// 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
// 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is
@@ -58,6 +59,7 @@ namespace proxy {
// is undefined, then
// i. If trapResult is not undefined, throw a TypeError exception.
// 11. Return trapResult.
- return CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet);
+ CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet);
+ return trapResult;
}
}
diff --git a/deps/v8/src/builtins/proxy-get-prototype-of.tq b/deps/v8/src/builtins/proxy-get-prototype-of.tq
new file mode 100644
index 0000000000..2418eaf423
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-get-prototype-of.tq
@@ -0,0 +1,70 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ transitioning builtin
+ ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'getPrototypeOf';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 7. Let handlerProto be ? Call(trap, handler, « target »).
+ const handlerProto = Call(context, trap, handler, target);
+
+ // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError
+ // exception.
+ if (!Is<JSReceiver>(handlerProto)) {
+ goto ThrowProxyGetPrototypeOfInvalid;
+ }
+
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ // 10. If extensibleTarget is true, return handlerProto.
+ const extensibleTarget: Object = object::ObjectIsExtensible(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ return handlerProto;
+ }
+
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ const targetProto = object::ObjectGetPrototypeOf(target);
+
+ // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError
+ // exception.
+ // 13. Return handlerProto.
+ if (BranchIfSameValue(targetProto, handlerProto)) {
+ return handlerProto;
+ }
+ ThrowTypeError(kProxyGetPrototypeOfNonExtensible);
+ }
+ label TrapUndefined(target: Object) {
+ // 6.a. Return ? target.[[GetPrototypeOf]]().
+ return object::ObjectGetPrototypeOf(target);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ label ThrowProxyGetPrototypeOfInvalid deferred {
+ ThrowTypeError(kProxyGetPrototypeOfInvalid);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq
index ab3898a9c7..ee394c5d84 100644
--- a/deps/v8/src/builtins/proxy-has-property.tq
+++ b/deps/v8/src/builtins/proxy-has-property.tq
@@ -22,11 +22,12 @@ namespace proxy {
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
// 5. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
// 6. Let trap be ? GetMethod(handler, "has").
// 7. If trap is undefined, then (see 7.a below).
@@ -42,7 +43,8 @@ namespace proxy {
if (BranchIfToBooleanIsTrue(trapResult)) {
return True;
}
- return CheckHasTrapResult(target, proxy, name);
+ CheckHasTrapResult(target, proxy, name);
+ return False;
}
label TrapUndefined(target: Object) {
// 7.a. Return ? target.[[HasProperty]](P).
diff --git a/deps/v8/src/builtins/proxy-is-extensible.tq b/deps/v8/src/builtins/proxy-is-extensible.tq
new file mode 100644
index 0000000000..82f4a5b955
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-is-extensible.tq
@@ -0,0 +1,56 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ transitioning builtin ProxyIsExtensible(implicit context:
+ Context)(proxy: JSProxy): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'isExtensible';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 5. Let trap be ? GetMethod(handler, "isExtensible").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «
+ // target»)).
+ const trapResult = ToBoolean(Call(context, trap, handler, target));
+
+ // 8. Let targetResult be ? IsExtensible(target).
+ const targetResult: bool = ToBoolean(object::ObjectIsExtensible(target));
+
+ // 9. If SameValue(booleanTrapResult, targetResult) is false, throw a
+ // TypeError exception.
+ if (trapResult != targetResult) {
+ ThrowTypeError(
+ kProxyIsExtensibleInconsistent,
+ SelectBooleanConstant(targetResult));
+ }
+ // 10. Return booleanTrapResult.
+ return SelectBooleanConstant(trapResult);
+ }
+ label TrapUndefined(target: Object) {
+ // 6.a. Return ? IsExtensible(target).
+ return object::ObjectIsExtensible(target);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-prevent-extensions.tq b/deps/v8/src/builtins/proxy-prevent-extensions.tq
new file mode 100644
index 0000000000..6d5d2569fb
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-prevent-extensions.tq
@@ -0,0 +1,66 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-preventextensions
+ // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-preventextensions
+ transitioning builtin
+ ProxyPreventExtensions(implicit context: Context)(
+ proxy: JSProxy, doThrow: Boolean): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'preventExtensions';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 5. Let trap be ? GetMethod(handler, "preventExtensions").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «
+ // target»)).
+ const trapResult = Call(context, trap, handler, target);
+
+ // 8. If booleanTrapResult is true, then
+ // 8.a. Let extensibleTarget be ? IsExtensible(target).
+ // 8.b If extensibleTarget is true, throw a TypeError exception.
+ if (BranchIfToBooleanIsTrue(trapResult)) {
+ const extensibleTarget: Object = object::ObjectIsExtensible(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ ThrowTypeError(kProxyPreventExtensionsExtensible);
+ }
+ } else {
+ if (doThrow == True) {
+ ThrowTypeError(kProxyTrapReturnedFalsish, kTrapName);
+ }
+ return False;
+ }
+
+ // 9. Return booleanTrapResult.
+ return True;
+ }
+ label TrapUndefined(target: Object) {
+ // 6.a. Return ? target.[[PreventExtensions]]().
+ if (doThrow == True) {
+ return object::ObjectPreventExtensionsThrow(target);
+ }
+ return object::ObjectPreventExtensionsDontThrow(target);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+} // namespace proxy
diff --git a/deps/v8/src/builtins/proxy-revocable.tq b/deps/v8/src/builtins/proxy-revocable.tq
index 695f005c9b..b09baab9cf 100644
--- a/deps/v8/src/builtins/proxy-revocable.tq
+++ b/deps/v8/src/builtins/proxy-revocable.tq
@@ -7,17 +7,13 @@
namespace proxy {
extern macro ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
- Object, Object): JSFunction;
- macro AllocateProxyRevokeFunction(implicit context: Context)(proxy: JSProxy):
- JSFunction {
- return AllocateProxyRevokeFunction(proxy, context);
- }
+ implicit context: Context)(JSProxy): JSFunction;
// Proxy.revocable(target, handler)
// https://tc39.github.io/ecma262/#sec-proxy.revocable
transitioning javascript builtin
ProxyRevocable(
- context: Context, receiver: Object, target: Object,
+ context: Context, _receiver: Object, target: Object,
handler: Object): JSProxyRevocableResult {
try {
const targetJSReceiver =
diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq
index 400f586b21..d89b54077a 100644
--- a/deps/v8/src/builtins/proxy-revoke.tq
+++ b/deps/v8/src/builtins/proxy-revoke.tq
@@ -9,7 +9,7 @@ namespace proxy {
// Proxy Revocation Functions
// https://tc39.github.io/ecma262/#sec-proxy-revocation-functions
transitioning javascript builtin
- ProxyRevoke(implicit context: Context)(): Undefined {
+ ProxyRevoke(js-implicit context: Context)(): Undefined {
// 1. Let p be F.[[RevocableProxy]].
const proxyObject: Object = context[PROXY_SLOT];
diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq
index 72181e08a8..d0411a8e89 100644
--- a/deps/v8/src/builtins/proxy-set-property.tq
+++ b/deps/v8/src/builtins/proxy-set-property.tq
@@ -30,21 +30,20 @@ namespace proxy {
return Undefined;
}
- // 2. Let handler be O.[[ProxyHandler]].
- const handler: Object = proxy.handler;
-
try {
+ // 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- const handlerJSReceiver =
- Cast<JSReceiver>(handler) otherwise ThrowProxyHandlerRevoked;
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
// 5. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ const target = UnsafeCast<JSReceiver>(proxy.target);
// 6. Let trap be ? GetMethod(handler, "set").
// 7. If trap is undefined, then (see 7.a below).
- const trap: Callable = GetMethod(handlerJSReceiver, 'set')
+ const trap: Callable = GetMethod(handler, 'set')
otherwise goto TrapUndefined(target);
// 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
@@ -61,11 +60,11 @@ namespace proxy {
// i. If targetDesc.[[Set]] is undefined, throw a TypeError
// exception.
// 12. Return true.
- const trapResult = Call(
- context, trap, handlerJSReceiver, target, name, value, receiverValue);
+ const trapResult =
+ Call(context, trap, handler, target, name, value, receiverValue);
if (BranchIfToBooleanIsTrue(trapResult)) {
- return CheckGetSetTrapResult(
- target, proxy, name, trapResult, kProxySet);
+ CheckGetSetTrapResult(target, proxy, name, value, kProxySet);
+ return value;
}
ThrowTypeErrorIfStrict(
SmiConstant(kProxyTrapReturnedFalsishFor), 'set', name);
@@ -77,7 +76,6 @@ namespace proxy {
return value;
}
label ThrowProxyHandlerRevoked deferred {
- assert(handler == Null);
ThrowTypeError(kProxyRevoked, 'set');
}
}
diff --git a/deps/v8/src/builtins/proxy-set-prototype-of.tq b/deps/v8/src/builtins/proxy-set-prototype-of.tq
new file mode 100644
index 0000000000..bbd99be411
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-set-prototype-of.tq
@@ -0,0 +1,77 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
+ // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
+ transitioning builtin
+ ProxySetPrototypeOf(implicit context: Context)(
+ proxy: JSProxy, proto: Object, doThrow: Boolean): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'setPrototypeOf';
+ try {
+ // 1. Assert: Either Type(V) is Object or Type(V) is Null.
+ assert(proto == Null || Is<JSReceiver>(proto));
+
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 6. Let trap be ? GetMethod(handler, "setPrototypeOf").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target, proto);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « target, V
+ // »)).
+ const trapResult = Call(context, trap, handler, target, proto);
+
+ // 9. If booleanTrapResult is false, return false.
+ if (BranchIfToBooleanIsFalse(trapResult)) {
+ if (doThrow == True) {
+ ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName);
+ }
+ return False;
+ }
+
+ // 10. Let extensibleTarget be ? IsExtensible(target).
+ // 11. If extensibleTarget is true, return true.
+ const extensibleTarget: Object = object::ObjectIsExtensible(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ return True;
+ }
+
+ // 12. Let targetProto be ? target.[[GetPrototypeOf]]().
+ const targetProto = object::ObjectGetPrototypeOf(target);
+
+ // 13. If SameValue(V, targetProto) is false, throw a TypeError
+ // exception.
+ // 14. Return true.
+ if (BranchIfSameValue(proto, targetProto)) {
+ return True;
+ }
+ ThrowTypeError(kProxySetPrototypeOfNonExtensible);
+ }
+ label TrapUndefined(target: Object, proto: Object) {
+ // 7.a. Return ? target.[[SetPrototypeOf]]().
+ if (doThrow == True) {
+ return object::ObjectSetPrototypeOfThrow(target, proto);
+ }
+ return object::ObjectSetPrototypeOfDontThrow(target, proto);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq
index 16bba85292..d95def5d0e 100644
--- a/deps/v8/src/builtins/proxy.tq
+++ b/deps/v8/src/builtins/proxy.tq
@@ -7,25 +7,23 @@
namespace proxy {
extern macro ProxiesCodeStubAssembler::AllocateProxy(
- JSReceiver, JSReceiver, Context): JSProxy;
- macro AllocateProxy(implicit context: Context)(
- target: JSReceiver, handler: JSReceiver): JSProxy {
- return AllocateProxy(target, handler, context);
- }
+ implicit context: Context)(JSReceiver, JSReceiver): JSProxy;
macro IsRevokedProxy(implicit context: Context)(o: JSReceiver): bool {
const proxy: JSProxy = Cast<JSProxy>(o) otherwise return false;
- const handler: JSReceiver =
- Cast<JSReceiver>(proxy.handler) otherwise return true;
+ Cast<JSReceiver>(proxy.handler) otherwise return true;
return false;
}
extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult(
implicit context:
- Context)(Object, JSProxy, Name, Object, constexpr int31): Object;
+ Context)(JSReceiver, JSProxy, Name, Object, constexpr int31);
+
+ extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult(
+ implicit context: Context)(JSReceiver, JSProxy, Name);
extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult(
- implicit context: Context)(Object, JSProxy, Name): Object;
+ implicit context: Context)(JSReceiver, JSProxy, Name);
const kProxyNonObject: constexpr MessageTemplate
generates 'MessageTemplate::kProxyNonObject';
@@ -37,6 +35,20 @@ namespace proxy {
generates 'MessageTemplate::kProxyTrapReturnedFalsishFor';
const kProxyPrivate: constexpr MessageTemplate
generates 'MessageTemplate::kProxyPrivate';
+ const kProxyIsExtensibleInconsistent: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyIsExtensibleInconsistent';
+ const kProxyPreventExtensionsExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyPreventExtensionsExtensible';
+ const kProxyTrapReturnedFalsish: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyTrapReturnedFalsish';
+ const kProxyGetPrototypeOfInvalid: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyGetPrototypeOfInvalid';
+ const kProxyGetPrototypeOfNonExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyGetPrototypeOfNonExtensible';
+ const kProxySetPrototypeOfNonExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxySetPrototypeOfNonExtensible';
+ const kProxyDeletePropertyNonExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyDeletePropertyNonExtensible';
const kProxyGet: constexpr int31
generates 'JSProxy::AccessKind::kGet';
diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq
new file mode 100644
index 0000000000..4c25e8338f
--- /dev/null
+++ b/deps/v8/src/builtins/reflect.tq
@@ -0,0 +1,82 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace reflect {
+
+ const kCalledOnNonObject: constexpr MessageTemplate
+ generates 'MessageTemplate::kCalledOnNonObject';
+
+ // ES6 section 26.1.10 Reflect.isExtensible
+ transitioning javascript builtin ReflectIsExtensible(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.isExtensible');
+ return object::ObjectIsExtensible(objectJSReceiver);
+ }
+
+ // ES6 section 26.1.12 Reflect.preventExtensions
+ transitioning javascript builtin ReflectPreventExtensions(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.preventExtensions');
+ return object::ObjectPreventExtensionsDontThrow(objectJSReceiver);
+ }
+
+ // ES6 section 26.1.8 Reflect.getPrototypeOf
+ transitioning javascript builtin ReflectGetPrototypeOf(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.getPrototypeOf');
+ return object::JSReceiverGetPrototypeOf(objectJSReceiver);
+ }
+
+ // ES6 section 26.1.14 Reflect.setPrototypeOf
+ transitioning javascript builtin ReflectSetPrototypeOf(
+ js-implicit context:
+ Context)(_receiver: Object, object: Object, proto: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.setPrototypeOf');
+ if (proto == Null || Is<JSReceiver>(proto)) {
+ return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto);
+ }
+ ThrowTypeError(kProtoObjectOrNull, proto);
+ }
+
+ extern transitioning builtin ToName(implicit context: Context)(Object): Name;
+ type OnNonExistent constexpr 'OnNonExistent';
+ const kReturnUndefined: constexpr OnNonExistent
+ generates 'OnNonExistent::kReturnUndefined';
+ extern macro SmiConstant(constexpr OnNonExistent): Smi;
+ extern transitioning builtin GetPropertyWithReceiver(
+ implicit context: Context)(Object, Name, Object, Smi): Object;
+
+ // ES6 section 26.1.6 Reflect.get
+ transitioning javascript builtin
+ ReflectGet(js-implicit context: Context)(...arguments): Object {
+ const length = arguments.length;
+ const object: Object = length > 0 ? arguments[0] : Undefined;
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.get');
+ const propertyKey: Object = length > 1 ? arguments[1] : Undefined;
+ const name: Name = ToName(propertyKey);
+ const receiver: Object = length > 2 ? arguments[2] : objectJSReceiver;
+ return GetPropertyWithReceiver(
+ objectJSReceiver, name, receiver, SmiConstant(kReturnUndefined));
+ }
+
+ // ES6 section 26.1.4 Reflect.deleteProperty
+ transitioning javascript builtin ReflectDeleteProperty(
+ js-implicit context:
+ Context)(_receiver: Object, object: Object, key: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.deleteProperty');
+ const name: Name = ToName(key);
+ if (IsPrivateSymbol(name)) {
+ return DeleteProperty(objectJSReceiver, name, kSloppy);
+ }
+ const proxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return DeleteProperty(objectJSReceiver, name, kSloppy);
+ return proxy::ProxyDeleteProperty(proxy, name, kSloppy);
+ }
+} // namespace reflect
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index 9b95f99f41..cb0038c6b6 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -22,7 +22,7 @@ namespace regexp_replace {
String, JSRegExp, Callable): String;
extern macro
- RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Number, bool): Smi;
+ RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi;
extern macro
RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast(
implicit context: Context)(JSReceiver, String):
@@ -72,8 +72,7 @@ namespace regexp_replace {
transitioning macro
RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
- matchesElements: FixedArray, matchesLength: intptr, string: String,
- replaceFn: Callable) {
+ matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) {
for (let i: intptr = 0; i < matchesLength; i++) {
const elArray =
Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
@@ -124,7 +123,7 @@ namespace regexp_replace {
matchesElements, matchesLengthInt, string, replaceFn);
} else {
RegExpReplaceCallableWithExplicitCaptures(
- matchesElements, matchesLengthInt, string, replaceFn);
+ matchesElements, matchesLengthInt, replaceFn);
}
return StringBuilderConcat(matches, matchesLength, string);
@@ -138,7 +137,7 @@ namespace regexp_replace {
let result: String = kEmptyString;
let lastMatchEnd: Smi = 0;
let unicode: bool = false;
- let replaceLength: Smi = replaceString.length_smi;
+ const replaceLength: Smi = replaceString.length_smi;
const global: bool = regexp.global;
if (global) {
@@ -209,7 +208,7 @@ namespace regexp_replace {
}
transitioning javascript builtin RegExpPrototypeReplace(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const methodName: constexpr string = 'RegExp.prototype.@@replace';
// RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index bf8c0cb68a..854f31cece 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- r2 : argument count (preserved for callee)
// -- r3 : target function (preserved for callee)
// -- r5 : new target (preserved for callee)
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the number of arguments to the callee.
// Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ SmiTag(r2);
- __ Push(r2, r3, r5, r3);
+ __ Push(r3, r5, r3);
__ CallRuntime(function_id, 1);
__ LoadRR(r4, r2);
// Restore target function and new target.
- __ Pop(r2, r3, r5);
- __ SmiUntag(r2);
+ __ Pop(r3, r5);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ JumpCodeObject(r4);
@@ -110,6 +106,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Register scratch = r4;
Label stack_overflow;
Generate_StackOverflowCheck(masm, r2, r7, &stack_overflow);
@@ -138,13 +135,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args);
- __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
- __ SubP(sp, sp, ip);
+ __ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2));
+ __ SubP(sp, sp, scratch);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ lay(ip, MemOperand(ip, -kPointerSize));
- __ LoadP(r0, MemOperand(ip, r6));
- __ StoreP(r0, MemOperand(ip, sp));
+ __ lay(scratch, MemOperand(scratch, -kPointerSize));
+ __ LoadP(r0, MemOperand(scratch, r6));
+ __ StoreP(r0, MemOperand(scratch, sp));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
@@ -159,15 +156,15 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore context from the frame.
__ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadP(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ SmiToPtrArrayOffset(r3, r3);
- __ AddP(sp, sp, r3);
+ __ SmiToPtrArrayOffset(scratch, scratch);
+ __ AddP(sp, sp, scratch);
__ AddP(sp, sp, Operand(kPointerSize));
__ Ret();
@@ -296,13 +293,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ ltgr(r2, r2);
__ beq(&no_args);
- __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
- __ SubP(sp, sp, ip);
+ __ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2));
+ __ SubP(sp, sp, r8);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ lay(ip, MemOperand(ip, -kPointerSize));
- __ LoadP(r0, MemOperand(ip, r6));
- __ StoreP(r0, MemOperand(ip, sp));
+ __ lay(r8, MemOperand(r8, -kPointerSize));
+ __ LoadP(r0, MemOperand(r8, r6));
+ __ StoreP(r0, MemOperand(r8, sp));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
@@ -409,11 +406,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
+ Register scratch = r7;
+
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ Move(ip, debug_hook);
- __ LoadB(ip, MemOperand(ip));
- __ CmpSmiLiteral(ip, Smi::zero(), r0);
+ __ Move(scratch, debug_hook);
+ __ LoadB(scratch, MemOperand(scratch));
+ __ CmpSmiLiteral(scratch, Smi::zero(), r0);
__ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -421,9 +420,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ Move(ip, debug_suspended_generator);
- __ LoadP(ip, MemOperand(ip));
- __ CmpP(ip, r3);
+ __ Move(scratch, debug_suspended_generator);
+ __ LoadP(scratch, MemOperand(scratch));
+ __ CmpP(scratch, r3);
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -434,8 +433,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&stack_overflow);
// Push receiver.
- __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
- __ Push(ip);
+ __ LoadP(scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ Push(scratch);
// ----------- S t a t e -------------
// -- r3 : the JSGeneratorObject to resume
@@ -626,6 +625,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
IsolateAddressId::kCEntryFPAddress, masm->isolate()));
__ LoadP(r6, MemOperand(r6));
__ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize));
+
+ Register scrach = r8;
+
// Set up frame pointer for the frame to be pushed.
// Need to add kPointerSize, because sp has one extra
// frame already for the frame type being pushed later.
@@ -642,17 +644,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
masm->isolate());
__ Move(r7, js_entry_sp);
- __ LoadAndTestP(r8, MemOperand(r7));
+ __ LoadAndTestP(scrach, MemOperand(r7));
__ bne(&non_outermost_js, Label::kNear);
__ StoreP(fp, MemOperand(r7));
- __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Load(scrach, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont, Label::kNear);
__ bind(&non_outermost_js);
- __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ Load(scrach, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
- __ StoreP(ip, MemOperand(sp)); // frame-type
+ __ StoreP(scrach, MemOperand(sp)); // frame-type
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -668,10 +670,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ Move(ip, ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ Move(scrach,
+ ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
+ masm->isolate()));
- __ StoreP(r2, MemOperand(ip));
+ __ StoreP(r2, MemOperand(scrach));
__ LoadRoot(r2, RootIndex::kException);
__ b(&exit, Label::kNear);
@@ -704,16 +707,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ pop(r7);
__ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ bne(&non_outermost_js_2, Label::kNear);
- __ mov(r8, Operand::Zero());
+ __ mov(scrach, Operand::Zero());
__ Move(r7, js_entry_sp);
- __ StoreP(r8, MemOperand(r7));
+ __ StoreP(scrach, MemOperand(r7));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(r5);
- __ Move(ip, ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate()));
- __ StoreP(r5, MemOperand(ip));
+ __ Move(scrach, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ StoreP(r5, MemOperand(scrach));
// Reset the stack to the callee saved registers.
__ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
@@ -949,13 +952,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee if needed, and caller)
- // -- r3 : new target (preserved for callee if needed, and caller)
- // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- r5 : new target (preserved for callee if needed, and caller)
+ // -- r3 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, r3, r5, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1140,6 +1141,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ LoadP(r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadLogicalHalfWordP(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
+ __ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
+ __ bne(&push_stack_frame);
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
@@ -1154,6 +1164,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
+ __ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1161,12 +1172,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ lghi(r1, Operand(0));
__ StoreHalfWord(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
r0);
// Load the initial bytecode offset.
@@ -1447,11 +1458,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2));
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
- MemOperand(kInterpreterDispatchTableRegister, ip));
+ MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
}
@@ -1578,13 +1591,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
- __ Pop(ip);
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ UseScratchRegisterScope temps(masm);
+ Register builtin = temps.Acquire();
+ __ Pop(builtin);
__ AddP(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(r0);
__ LoadRR(r14, r0);
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ Jump(builtin);
}
} // namespace
@@ -1745,13 +1762,14 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r2: actual number of arguments
// r3: callable
{
+ Register scratch = r5;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ AddP(r4, sp, r4);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r4, -kPointerSize));
- __ StoreP(ip, MemOperand(r4));
+ __ LoadP(scratch, MemOperand(r4, -kPointerSize));
+ __ StoreP(scratch, MemOperand(r4));
__ SubP(r4, Operand(kPointerSize));
__ CmpP(r4, sp);
__ bne(&loop);
@@ -1944,7 +1962,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r6, scratch, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -1955,13 +1973,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r4, kPointerSize));
+ __ LoadP(scratch, MemOperand(r4, kPointerSize));
__ la(r4, MemOperand(r4, kPointerSize));
- __ CompareRoot(ip, RootIndex::kTheHoleValue);
+ __ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
- __ LoadRoot(ip, RootIndex::kUndefinedValue);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
- __ push(ip);
+ __ push(scratch);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ AddP(r2, r2, r6);
@@ -2007,8 +2025,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ LoadP(scratch,
+ MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpP(scratch,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ beq(&arguments_adaptor);
{
__ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2042,9 +2062,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ AddP(r2, r2, r7);
__ bind(&loop);
{
- __ ShiftLeftP(ip, r7, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(r6, ip));
- __ push(ip);
+ __ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2));
+ __ LoadP(scratch, MemOperand(r6, scratch));
+ __ push(scratch);
__ SubP(r7, r7, Operand(1));
__ CmpP(r7, Operand::Zero());
__ bne(&loop);
@@ -2189,10 +2209,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- r6 : the number of [[BoundArguments]]
// -----------------------------------
+ Register scratch = r8;
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ LoadRR(r8, sp); // preserve previous stack pointer
+ __ LoadRR(scratch, sp); // preserve previous stack pointer
__ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
__ SubP(sp, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2201,7 +2222,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ CompareRoot(sp, RootIndex::kRealStackLimit);
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
- __ LoadRR(sp, r8);
+ __ LoadRR(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2221,7 +2242,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ beq(&skip);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ LoadP(r0, MemOperand(r8, r7));
+ __ LoadP(r0, MemOperand(scratch, r7));
__ StoreP(r0, MemOperand(sp, r7));
__ AddP(r7, r7, Operand(kPointerSize));
__ BranchOnCount(r1, &loop);
@@ -2257,9 +2278,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r3);
// Patch the receiver to [[BoundThis]].
- __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+ __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
- __ StoreP(ip, MemOperand(sp, r1));
+ __ StoreP(r5, MemOperand(sp, r1));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2749,7 +2770,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CompareRoot(r1, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay, Label::kNear);
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -3000,13 +3021,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LoadlB(scratch, MemOperand(scratch, 0));
__ CmpP(scratch, Operand::Zero());
- Label profiler_disabled;
- Label end_profiler_check;
- __ beq(&profiler_disabled, Label::kNear);
- __ Move(scratch, thunk_ref);
- __ b(&end_profiler_check, Label::kNear);
- __ bind(&profiler_disabled);
- __ LoadRR(scratch, function_address);
+ Label profiler_enabled, end_profiler_check;
+ __ bne(&profiler_enabled, Label::kNear);
+ __ Move(scratch, ExternalReference::address_of_runtime_stats_flag());
+ __ LoadlW(scratch, MemOperand(scratch, 0));
+ __ CmpP(scratch, Operand::Zero());
+ __ bne(&profiler_enabled, Label::kNear);
+ {
+ // Call the api function directly.
+ __ LoadRR(scratch, function_address);
+ __ b(&end_profiler_check, Label::kNear);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Move(scratch, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
@@ -3304,7 +3334,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// Unused.
- __ stop(0);
+ __ stop();
}
#undef __
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index e3403c601d..7188eb04a8 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -157,10 +157,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- SegmentSize segment_size = isolate->serializer_enabled()
- ? SegmentSize::kLarge
- : SegmentSize::kDefault;
- Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
+ Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(
@@ -181,10 +178,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- SegmentSize segment_size = isolate->serializer_enabled()
- ? SegmentSize::kLarge
- : SegmentSize::kDefault;
- Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
+ Zone zone(isolate->allocator(), ZONE_NAME);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
CallInterfaceDescriptor descriptor(interface_descriptor);
@@ -232,9 +226,9 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
- HeapIterator iterator(isolate->heap());
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(isolate->heap());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsCode()) continue;
Code code = Code::cast(obj);
bool flush_icache = false;
@@ -282,10 +276,6 @@ Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
} // namespace
-#ifdef _MSC_VER
-#pragma optimize( "", off )
-#endif
-
// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
@@ -363,10 +353,5 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
builtins->MarkInitialized();
}
-#ifdef _MSC_VER
-#pragma optimize( "", on )
-#endif
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq
index 16405d4c12..8b9fe84dfb 100644
--- a/deps/v8/src/builtins/string-endswith.tq
+++ b/deps/v8/src/builtins/string-endswith.tq
@@ -28,12 +28,13 @@ namespace string {
// https://tc39.github.io/ecma262/#sec-string.prototype.endswith
transitioning javascript builtin StringPrototypeEndsWith(
- context: Context, receiver: Object, ...arguments): Boolean {
+ js-implicit context: Context, receiver: Object)(...arguments): Boolean {
const searchString: Object = arguments[0];
const endPosition: Object = arguments[1];
+ const kBuiltinName: constexpr string = 'String.prototype.endsWith';
// 1. Let O be ? RequireObjectCoercible(this value).
- const object: Object = RequireObjectCoercible(receiver);
+ const object: Object = RequireObjectCoercible(receiver, kBuiltinName);
// 2. Let S be ? ToString(O).
const string: String = ToString_Inline(context, object);
@@ -41,7 +42,7 @@ namespace string {
// 3. Let isRegExp be ? IsRegExp(searchString).
// 4. If isRegExp is true, throw a TypeError exception.
if (IsRegExp(searchString)) {
- ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.endsWith');
+ ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName);
}
// 5. Let searchStr be ? ToString(searchString).
@@ -63,7 +64,7 @@ namespace string {
const searchLength: Smi = searchStr.length_smi;
// 10. Let start be end - searchLength.
- let start = end - searchLength;
+ const start = end - searchLength;
// 11. If start is less than 0, return false.
if (start < 0) return False;
diff --git a/deps/v8/src/builtins/string-html.tq b/deps/v8/src/builtins/string-html.tq
index a2b1625206..80b5f77887 100644
--- a/deps/v8/src/builtins/string-html.tq
+++ b/deps/v8/src/builtins/string-html.tq
@@ -22,22 +22,23 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.anchor
transitioning javascript builtin StringPrototypeAnchor(
- context: Context, receiver: Object, ...arguments): String {
+ js-implicit context: Context, receiver: Object)(...arguments): String {
return CreateHTML(
receiver, 'String.prototype.anchor', 'a', 'name', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.big
transitioning javascript builtin
- StringPrototypeBig(context: Context, receiver: Object, ...arguments): String {
+ StringPrototypeBig(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.big', 'big', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.blink
transitioning javascript builtin
- StringPrototypeBlink(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeBlink(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.blink', 'blink', kEmptyString,
kEmptyString);
@@ -45,56 +46,56 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.bold
transitioning javascript builtin
- StringPrototypeBold(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeBold(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.bold', 'b', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.fontcolor
transitioning javascript builtin
- StringPrototypeFontcolor(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeFontcolor(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.fontcolor', 'font', 'color', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.fontsize
transitioning javascript builtin
- StringPrototypeFontsize(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeFontsize(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.fontsize', 'font', 'size', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.fixed
transitioning javascript builtin
- StringPrototypeFixed(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeFixed(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.fixed', 'tt', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.italics
transitioning javascript builtin
- StringPrototypeItalics(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeItalics(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.italics', 'i', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.link
transitioning javascript builtin
- StringPrototypeLink(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeLink(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.link', 'a', 'href', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.small
transitioning javascript builtin
- StringPrototypeSmall(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeSmall(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.small', 'small', kEmptyString,
kEmptyString);
@@ -102,8 +103,8 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.strike
transitioning javascript builtin
- StringPrototypeStrike(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeStrike(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.strike', 'strike', kEmptyString,
kEmptyString);
@@ -111,14 +112,16 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.sub
transitioning javascript builtin
- StringPrototypeSub(context: Context, receiver: Object, ...arguments): String {
+ StringPrototypeSub(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.sub', 'sub', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.sup
transitioning javascript builtin
- StringPrototypeSup(context: Context, receiver: Object, ...arguments): String {
+ StringPrototypeSup(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.sup', 'sup', kEmptyString, kEmptyString);
}
diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq
index f5c6099c25..5b8f864661 100644
--- a/deps/v8/src/builtins/string-iterator.tq
+++ b/deps/v8/src/builtins/string-iterator.tq
@@ -17,7 +17,7 @@ namespace string_iterator {
// ES6 #sec-string.prototype-@@iterator
transitioning javascript builtin StringPrototypeIterator(
- implicit context: Context)(receiver: Object): JSStringIterator {
+ js-implicit context: Context)(receiver: Object): JSStringIterator {
const name: String =
ToThisString(receiver, 'String.prototype[Symbol.iterator]');
const index: Smi = 0;
@@ -26,7 +26,7 @@ namespace string_iterator {
// ES6 #sec-%stringiteratorprototype%.next
transitioning javascript builtin StringIteratorPrototypeNext(
- implicit context: Context)(receiver: Object): JSIteratorResult {
+ js-implicit context: Context)(receiver: Object): JSObject {
const iterator = Cast<JSStringIterator>(receiver) otherwise ThrowTypeError(
kIncompatibleMethodReceiver, 'String Iterator.prototype.next',
receiver);
@@ -34,13 +34,13 @@ namespace string_iterator {
const position: intptr = SmiUntag(iterator.next_index);
const length: intptr = string.length_intptr;
if (position >= length) {
- return NewJSIteratorResult(Undefined, True);
+ return AllocateJSIteratorResult(Undefined, True);
}
// Move to next codepoint.
const encoding = UTF16;
const ch = string::LoadSurrogatePairAt(string, length, position, encoding);
- const value: String = string::StringFromSingleCodePoint(ch, encoding);
+ const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch);
iterator.next_index = SmiTag(position + value.length_intptr);
- return NewJSIteratorResult(value, False);
+ return AllocateJSIteratorResult(value, False);
}
}
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index f2590011ea..0d9d4ee498 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -28,7 +28,7 @@ namespace string_repeat {
// https://tc39.github.io/ecma262/#sec-string.prototype.repeat
transitioning javascript builtin StringPrototypeRepeat(
- context: Context, receiver: Object, count: Object): String {
+ js-implicit context: Context, receiver: Object)(count: Object): String {
// 1. Let O be ? RequireObjectCoercible(this value).
// 2. Let S be ? ToString(O).
const s: String = ToThisString(receiver, kBuiltinName);
diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq
index 41eb38b0ad..b066fb7669 100644
--- a/deps/v8/src/builtins/string-slice.tq
+++ b/deps/v8/src/builtins/string-slice.tq
@@ -9,7 +9,7 @@ namespace string_slice {
// ES6 #sec-string.prototype.slice ( start, end )
// https://tc39.github.io/ecma262/#sec-string.prototype.slice
transitioning javascript builtin StringPrototypeSlice(
- implicit context: Context)(receiver: Object, ...arguments): String {
+ js-implicit context: Context, receiver: Object)(...arguments): String {
// 1. Let O be ? RequireObjectCoercible(this value).
// 2. Let S be ? ToString(O).
const string: String = ToThisString(receiver, 'String.prototype.slice');
diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq
index 1f885a2afd..b03e67ecf5 100644
--- a/deps/v8/src/builtins/string-startswith.tq
+++ b/deps/v8/src/builtins/string-startswith.tq
@@ -8,23 +8,15 @@ namespace string {
extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context:
Context)(Object): bool;
- // TODO(ryzokuken): Add RequireObjectCoercible to base.tq and update callsites
- macro RequireObjectCoercible(implicit context: Context)(argument: Object):
- Object {
- if (IsNullOrUndefined(argument)) {
- ThrowTypeError(kCalledOnNullOrUndefined, 'String.prototype.startsWith');
- }
- return argument;
- }
-
// https://tc39.github.io/ecma262/#sec-string.prototype.startswith
transitioning javascript builtin StringPrototypeStartsWith(
- context: Context, receiver: Object, ...arguments): Boolean {
+ js-implicit context: Context, receiver: Object)(...arguments): Boolean {
const searchString: Object = arguments[0];
const position: Object = arguments[1];
+ const kBuiltinName: constexpr string = 'String.prototype.startsWith';
// 1. Let O be ? RequireObjectCoercible(this value).
- const object: Object = RequireObjectCoercible(receiver);
+ const object: Object = RequireObjectCoercible(receiver, kBuiltinName);
// 2. Let S be ? ToString(O).
const string: String = ToString_Inline(context, object);
@@ -32,7 +24,7 @@ namespace string {
// 3. Let isRegExp be ? IsRegExp(searchString).
// 4. If isRegExp is true, throw a TypeError exception.
if (IsRegExp(searchString)) {
- ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.startsWith');
+ ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName);
}
// 5. Let searchStr be ? ToString(searchString).
diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq
index f322eeed06..1fafb8af43 100644
--- a/deps/v8/src/builtins/string-substring.tq
+++ b/deps/v8/src/builtins/string-substring.tq
@@ -28,7 +28,7 @@ namespace string_substring {
// ES6 #sec-string.prototype.substring
transitioning javascript builtin StringPrototypeSubstring(
- implicit context: Context)(receiver: Object, ...arguments): String {
+ js-implicit context: Context, receiver: Object)(...arguments): String {
// Check that {receiver} is coercible to Object and convert it to a String.
const string: String = ToThisString(receiver, 'String.prototype.substring');
const length = string.length_smi;
diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq
index 1e5a74eb49..dbcc5799e1 100644
--- a/deps/v8/src/builtins/string.tq
+++ b/deps/v8/src/builtins/string.tq
@@ -7,20 +7,21 @@
namespace string {
// ES6 #sec-string.prototype.tostring
transitioning javascript builtin
- StringPrototypeToString(implicit context: Context)(receiver: Object): Object {
+ StringPrototypeToString(js-implicit context: Context)(receiver: Object):
+ Object {
return ToThisValue(receiver, kString, 'String.prototype.toString');
}
// ES6 #sec-string.prototype.valueof
transitioning javascript builtin
- StringPrototypeValueOf(implicit context: Context)(receiver: Object): Object {
+ StringPrototypeValueOf(js-implicit context: Context)(receiver: Object):
+ Object {
return ToThisValue(receiver, kString, 'String.prototype.valueOf');
}
extern macro StringBuiltinsAssembler::LoadSurrogatePairAt(
String, intptr, intptr, constexpr UnicodeEncoding): int32;
- extern macro StringFromSingleCodePoint(int32, constexpr UnicodeEncoding):
- String;
+ extern macro StringFromSingleUTF16EncodedCodePoint(int32): String;
// This function assumes StringPrimitiveWithNoCustomIteration is true.
transitioning builtin StringToList(implicit context: Context)(string: String):
@@ -38,7 +39,7 @@ namespace string {
let i: intptr = 0;
while (i < stringLength) {
const ch: int32 = LoadSurrogatePairAt(string, stringLength, i, encoding);
- const value: String = StringFromSingleCodePoint(ch, encoding);
+ const value: String = StringFromSingleUTF16EncodedCodePoint(ch);
elements[arrayLength] = value;
// Increment and continue the loop.
i = i + value.length_intptr;
@@ -52,9 +53,9 @@ namespace string {
}
transitioning macro GenerateStringAt(implicit context: Context)(
- receiver: Object, position: Object, methodName: constexpr string):
- never labels IfInBounds(String, intptr, intptr),
- IfOutOfBounds {
+ receiver: Object, position: Object,
+ methodName: constexpr string): never labels
+ IfInBounds(String, intptr, intptr), IfOutOfBounds {
// Check that {receiver} is coercible to Object and convert it to a String.
const string: String = ToThisString(receiver, methodName);
// Convert the {position} to a Smi and check that it's in bounds of
@@ -70,12 +71,13 @@ namespace string {
// ES6 #sec-string.prototype.charat
transitioning javascript builtin StringPrototypeCharAt(
- implicit context: Context)(receiver: Object, position: Object): Object {
+ js-implicit context: Context,
+ receiver: Object)(position: Object): Object {
try {
GenerateStringAt(receiver, position, 'String.prototype.charAt')
otherwise IfInBounds, IfOutOfBounds;
}
- label IfInBounds(string: String, index: intptr, length: intptr) {
+ label IfInBounds(string: String, index: intptr, _length: intptr) {
const code: int32 = StringCharCodeAt(string, index);
return StringFromSingleCharCode(code);
}
@@ -86,12 +88,13 @@ namespace string {
// ES6 #sec-string.prototype.charcodeat
transitioning javascript builtin StringPrototypeCharCodeAt(
- implicit context: Context)(receiver: Object, position: Object): Object {
+ js-implicit context: Context,
+ receiver: Object)(position: Object): Object {
try {
GenerateStringAt(receiver, position, 'String.prototype.charCodeAt')
otherwise IfInBounds, IfOutOfBounds;
}
- label IfInBounds(string: String, index: intptr, length: intptr) {
+ label IfInBounds(string: String, index: intptr, _length: intptr) {
const code: int32 = StringCharCodeAt(string, index);
return Convert<Smi>(code);
}
@@ -102,7 +105,8 @@ namespace string {
// ES6 #sec-string.prototype.codepointat
transitioning javascript builtin StringPrototypeCodePointAt(
- implicit context: Context)(receiver: Object, position: Object): Object {
+ js-implicit context: Context,
+ receiver: Object)(position: Object): Object {
try {
GenerateStringAt(receiver, position, 'String.prototype.codePointAt')
otherwise IfInBounds, IfOutOfBounds;
@@ -121,7 +125,7 @@ namespace string {
// ES6 String.prototype.concat(...args)
// ES6 #sec-string.prototype.concat
transitioning javascript builtin StringPrototypeConcat(
- implicit context: Context)(receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// Check that {receiver} is coercible to Object and convert it to a String.
let string: String = ToThisString(receiver, 'String.prototype.concat');
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index a0d745b2f4..f6ab289e12 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -8,30 +8,77 @@ namespace typed_array_createtypedarray {
extern builtin IterableToListMayPreserveHoles(Context, Object, Callable):
JSArray;
- extern macro ConstructorBuiltinsAssembler::EmitFastNewObject(
- implicit context: Context)(JSFunction, JSReceiver): JSTypedArray;
extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
- implicit context: Context)(JSTypedArray, uintptr): JSArrayBuffer;
+ implicit context: Context)(uintptr): JSArrayBuffer;
+ extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
implicit context: Context)(JSTypedArray): JSFunction;
extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer):
bool;
- extern macro TypedArrayBuiltinsAssembler::SetupTypedArray(
- JSTypedArray, uintptr, uintptr, uintptr): void;
+ extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
+ JSTypedArray): void;
extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
Map, String): never;
extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number):
void;
+ transitioning macro AllocateTypedArray(implicit context: Context)(
+ isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer,
+ byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray {
+ let elements: ByteArray;
+ let externalPointer: RawPtr;
+ let basePointer: ByteArray | Smi;
+ if constexpr (isOnHeap) {
+ elements = AllocateByteArray(byteLength);
+ basePointer = elements;
+ externalPointer = PointerConstant(kExternalPointerForOnHeapArray);
+ } else {
+ basePointer = Convert<Smi>(0);
+
+ // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
+ // platforms are self-limiting, because we can't allocate an array bigger
+ // than our 32-bit arithmetic range anyway. 64 bit platforms could
+ // theoretically have an offset up to 2^35 - 1.
+ const backingStore: RawPtr = buffer.backing_store;
+ externalPointer = backingStore + Convert<intptr>(byteOffset);
+
+ // Assert no overflow has occurred. Only assert if the mock array buffer
+ // allocator is NOT used. When the mock array buffer is used, impossibly
+ // large allocations are allowed that would erroneously cause an overflow
+ // and this assertion to fail.
+ assert(
+ IsMockArrayBufferAllocatorFlag() ||
+ Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore));
+
+ elements = kEmptyByteArray;
+ }
+
+ // We can't just build the new object with "new JSTypedArray" here because
+ // Torque doesn't know its full size including embedder fields, so use CSA
+ // for the allocation step.
+ const typedArray =
+ UnsafeCast<JSTypedArray>(AllocateFastOrSlowJSObjectFromMap(map));
+ typedArray.elements = elements;
+ typedArray.buffer = buffer;
+ typedArray.byte_offset = byteOffset;
+ typedArray.byte_length = byteLength;
+ typedArray.length = length;
+ typedArray.external_pointer = externalPointer;
+ typedArray.base_pointer = basePointer;
+ SetupTypedArrayEmbedderFields(typedArray);
+ return typedArray;
+ }
+
transitioning macro TypedArrayInitialize(implicit context: Context)(
- initialize: constexpr bool, typedArray: JSTypedArray, length: PositiveSmi,
+ initialize: constexpr bool, map: Map, length: PositiveSmi,
elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): uintptr {
+ bufferConstructor: JSReceiver): JSTypedArray {
const byteLength = elementsInfo.CalculateByteLength(length)
otherwise ThrowRangeError(kInvalidArrayBufferLength);
const byteLengthNum = Convert<Number>(byteLength);
const defaultConstructor = GetArrayBufferFunction();
+ const byteOffset: uintptr = 0;
try {
if (bufferConstructor != defaultConstructor) {
@@ -39,14 +86,21 @@ namespace typed_array_createtypedarray {
defaultConstructor, bufferConstructor, byteLengthNum));
}
- if (byteLength > V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP) goto AllocateOffHeap;
+ if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap;
+
+ const buffer = AllocateEmptyOnHeapBuffer(byteLength);
- AllocateEmptyOnHeapBuffer(typedArray, byteLength);
+ const isOnHeap: constexpr bool = true;
+ const typedArray = AllocateTypedArray(
+ isOnHeap, map, buffer, byteOffset, byteLength,
+ Convert<uintptr>(length));
if constexpr (initialize) {
const backingStore = typedArray.data_ptr;
typed_array::CallCMemset(backingStore, 0, byteLength);
}
+
+ return typedArray;
}
label AllocateOffHeap {
if constexpr (initialize) {
@@ -58,22 +112,18 @@ namespace typed_array_createtypedarray {
}
label AttachOffHeapBuffer(bufferObj: Object) {
const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
- const byteOffset: uintptr = 0;
- typedArray.AttachOffHeapBuffer(buffer, byteOffset);
+ const isOnHeap: constexpr bool = false;
+ return AllocateTypedArray(
+ isOnHeap, map, buffer, byteOffset, byteLength,
+ Convert<uintptr>(length));
}
-
- const byteOffset: uintptr = 0;
- SetupTypedArray(
- typedArray, Convert<uintptr>(length), byteOffset, byteLength);
-
- return byteLength;
}
// 22.2.4.2 TypedArray ( length )
// ES #sec-typedarray-length
transitioning macro ConstructByLength(implicit context: Context)(
- typedArray: JSTypedArray, length: Object,
- elementsInfo: typed_array::TypedArrayElementsInfo): void {
+ map: Map, length: Object,
+ elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
const convertedLength: Number =
ToInteger_Inline(context, length, kTruncateMinusZero);
// The maximum length of a TypedArray is MaxSmi().
@@ -84,23 +134,22 @@ namespace typed_array_createtypedarray {
otherwise ThrowRangeError(kInvalidTypedArrayLength, length);
const defaultConstructor: Constructor = GetArrayBufferFunction();
const initialize: constexpr bool = true;
- TypedArrayInitialize(
- initialize, typedArray, positiveLength, elementsInfo,
- defaultConstructor);
+ return TypedArrayInitialize(
+ initialize, map, positiveLength, elementsInfo, defaultConstructor);
}
// 22.2.4.4 TypedArray ( object )
// ES #sec-typedarray-object
transitioning macro ConstructByArrayLike(implicit context: Context)(
- typedArray: JSTypedArray, arrayLike: HeapObject, initialLength: Object,
+ map: Map, arrayLike: HeapObject, initialLength: Object,
elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): void {
+ bufferConstructor: JSReceiver): JSTypedArray {
// The caller has looked up length on arrayLike, which is observable.
const length: PositiveSmi = ToSmiLength(initialLength)
otherwise ThrowRangeError(kInvalidTypedArrayLength, initialLength);
const initialize: constexpr bool = false;
- const byteLength = TypedArrayInitialize(
- initialize, typedArray, length, elementsInfo, bufferConstructor);
+ const typedArray = TypedArrayInitialize(
+ initialize, map, length, elementsInfo, bufferConstructor);
try {
const src: JSTypedArray = Cast<JSTypedArray>(arrayLike) otherwise IfSlow;
@@ -112,6 +161,7 @@ namespace typed_array_createtypedarray {
goto IfSlow;
} else if (length > 0) {
+ const byteLength = typedArray.byte_length;
assert(byteLength <= kArrayBufferMaxByteLength);
typed_array::CallCMemcpy(typedArray.data_ptr, src.data_ptr, byteLength);
}
@@ -121,13 +171,13 @@ namespace typed_array_createtypedarray {
TypedArrayCopyElements(context, typedArray, arrayLike, length);
}
}
+ return typedArray;
}
// 22.2.4.4 TypedArray ( object )
// ES #sec-typedarray-object
transitioning macro ConstructByIterable(implicit context: Context)(
- typedArray: JSTypedArray, iterable: JSReceiver, iteratorFn: Callable,
- elementsInfo: typed_array::TypedArrayElementsInfo): never
+ iterable: JSReceiver, iteratorFn: Callable): never
labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
const array: JSArray =
IterableToListMayPreserveHoles(context, iterable, iteratorFn);
@@ -137,8 +187,7 @@ namespace typed_array_createtypedarray {
// 22.2.4.3 TypedArray ( typedArray )
// ES #sec-typedarray-typedarray
transitioning macro ConstructByTypedArray(implicit context: Context)(
- typedArray: JSTypedArray, srcTypedArray: JSTypedArray,
- elementsInfo: typed_array::TypedArrayElementsInfo): never
+ srcTypedArray: JSTypedArray): never
labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
let bufferConstructor: JSReceiver = GetArrayBufferFunction();
const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
@@ -161,8 +210,8 @@ namespace typed_array_createtypedarray {
// 22.2.4.5 TypedArray ( buffer, byteOffset, length )
// ES #sec-typedarray-buffer-byteoffset-length
transitioning macro ConstructByArrayBuffer(implicit context: Context)(
- typedArray: JSTypedArray, buffer: JSArrayBuffer, byteOffset: Object,
- length: Object, elementsInfo: typed_array::TypedArrayElementsInfo): void {
+ map: Map, buffer: JSArrayBuffer, byteOffset: Object, length: Object,
+ elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
try {
let offset: uintptr = 0;
if (byteOffset != Undefined) {
@@ -224,12 +273,13 @@ namespace typed_array_createtypedarray {
goto IfInvalidLength;
}
- SetupTypedArray(
- typedArray, Convert<uintptr>(newLength), offset, newByteLength);
- typedArray.AttachOffHeapBuffer(buffer, offset);
+ const isOnHeap: constexpr bool = false;
+ return AllocateTypedArray(
+ isOnHeap, map, buffer, offset, newByteLength,
+ Convert<uintptr>(newLength));
}
label IfInvalidAlignment(problemString: String) deferred {
- ThrowInvalidTypedArrayAlignment(typedArray.map, problemString);
+ ThrowInvalidTypedArrayAlignment(map, problemString);
}
label IfInvalidByteLength deferred {
ThrowRangeError(kInvalidArrayBufferLength);
@@ -242,16 +292,15 @@ namespace typed_array_createtypedarray {
}
}
- transitioning macro ConstructByJSReceiver(implicit context: Context)(
- array: JSTypedArray, obj: JSReceiver,
- elementsInfo: typed_array::TypedArrayElementsInfo): never
+ transitioning macro ConstructByJSReceiver(implicit context:
+ Context)(obj: JSReceiver): never
labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
try {
const iteratorMethod: Object =
GetIteratorMethod(obj) otherwise IfIteratorUndefined;
const iteratorFn: Callable = Cast<Callable>(iteratorMethod)
otherwise ThrowTypeError(kIteratorSymbolNonCallable);
- ConstructByIterable(array, obj, iteratorFn, elementsInfo)
+ ConstructByIterable(obj, iteratorFn)
otherwise IfConstructByArrayLike;
}
label IfIteratorUndefined {
@@ -273,22 +322,12 @@ namespace typed_array_createtypedarray {
assert(IsConstructor(target));
// 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
// "%TypedArrayPrototype%").
- const array: JSTypedArray = EmitFastNewObject(target, newTarget);
- // We need to set the byte_offset / byte_length to some sane values
- // to keep the heap verifier happy.
- // TODO(bmeurer, v8:4153): Fix this initialization to not use
- // EmitFastNewObject, which causes the problem, since it puts
- // Undefined into all slots of the object even though that
- // doesn't make any sense for these fields.
- array.byte_offset = 0;
- array.byte_length = 0;
- array.length = 0;
- array.base_pointer = Convert<Smi>(0);
+ const map = GetDerivedMap(target, newTarget);
// 5. Let elementSize be the Number value of the Element Size value in Table
// 56 for constructorName.
const elementsInfo: typed_array::TypedArrayElementsInfo =
- typed_array::GetTypedArrayElementsInfo(array);
+ typed_array::GetTypedArrayElementsInfo(map);
try {
typeswitch (arg1) {
@@ -296,15 +335,13 @@ namespace typed_array_createtypedarray {
goto IfConstructByLength(length);
}
case (buffer: JSArrayBuffer): {
- ConstructByArrayBuffer(array, buffer, arg2, arg3, elementsInfo);
+ return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo);
}
case (typedArray: JSTypedArray): {
- ConstructByTypedArray(array, typedArray, elementsInfo)
- otherwise IfConstructByArrayLike;
+ ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike;
}
case (obj: JSReceiver): {
- ConstructByJSReceiver(array, obj, elementsInfo)
- otherwise IfConstructByArrayLike;
+ ConstructByJSReceiver(obj) otherwise IfConstructByArrayLike;
}
// The first argument was a number or fell through and is treated as
// a number. https://tc39.github.io/ecma262/#sec-typedarray-length
@@ -314,14 +351,13 @@ namespace typed_array_createtypedarray {
}
}
label IfConstructByLength(length: Object) {
- ConstructByLength(array, length, elementsInfo);
+ return ConstructByLength(map, length, elementsInfo);
}
label IfConstructByArrayLike(
arrayLike: HeapObject, length: Object, bufferConstructor: JSReceiver) {
- ConstructByArrayLike(
- array, arrayLike, length, elementsInfo, bufferConstructor);
+ return ConstructByArrayLike(
+ map, arrayLike, length, elementsInfo, bufferConstructor);
}
- return array;
}
transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index 4f8804880e..221814cb79 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -29,8 +29,8 @@ namespace typed_array_every {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning javascript builtin
- TypedArrayPrototypeEvery(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeEvery(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg
try {
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
index 9407c3a7af..3937699c73 100644
--- a/deps/v8/src/builtins/typed-array-filter.tq
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -10,7 +10,7 @@ namespace typed_array_filter {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter
transitioning javascript builtin TypedArrayPrototypeFilter(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg
try {
diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq
index 3c331eb3bb..be1943ccf4 100644
--- a/deps/v8/src/builtins/typed-array-find.tq
+++ b/deps/v8/src/builtins/typed-array-find.tq
@@ -29,8 +29,8 @@ namespace typed_array_find {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
transitioning javascript builtin
- TypedArrayPrototypeFind(implicit context:
- Context)(receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeFind(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg
try {
diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq
index 05f112d0d5..a5ee7897d3 100644
--- a/deps/v8/src/builtins/typed-array-findindex.tq
+++ b/deps/v8/src/builtins/typed-array-findindex.tq
@@ -29,8 +29,8 @@ namespace typed_array_findindex {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.findIndex
transitioning javascript builtin
- TypedArrayPrototypeFindIndex(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg.
try {
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
index dbf1a121da..656a22e07d 100644
--- a/deps/v8/src/builtins/typed-array-foreach.tq
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -25,8 +25,8 @@ namespace typed_array_foreach {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning javascript builtin
- TypedArrayPrototypeForEach(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeForEach(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = this_arg.
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
index 7af918a07b..d69dc9a98d 100644
--- a/deps/v8/src/builtins/typed-array-reduce.tq
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -19,7 +19,7 @@ namespace typed_array_reduce {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
const value: Object = witness.Load(k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -27,7 +27,7 @@ namespace typed_array_reduce {
witness.GetStable());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, kBuiltinName);
}
return accumulator;
@@ -35,8 +35,8 @@ namespace typed_array_reduce {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce
transitioning javascript builtin
- TypedArrayPrototypeReduce(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeReduce(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = initialValue.
try {
@@ -45,7 +45,7 @@ namespace typed_array_reduce {
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const initialValue = arguments.length >= 2 ? arguments[1] : Hole;
+ const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
return ReduceAllElements(uarray, callbackfn, initialValue);
}
label NotCallable deferred {
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
index 59ce7ff55b..99a84401ed 100644
--- a/deps/v8/src/builtins/typed-array-reduceright.tq
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -19,7 +19,7 @@ namespace typed_array_reduceright {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
const value: Object = witness.Load(k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -27,7 +27,7 @@ namespace typed_array_reduceright {
witness.GetStable());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, kBuiltinName);
}
return accumulator;
@@ -35,8 +35,8 @@ namespace typed_array_reduceright {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
transitioning javascript builtin
- TypedArrayPrototypeReduceRight(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeReduceRight(
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// arguments[0] = callback
// arguments[1] = initialValue.
try {
@@ -45,7 +45,7 @@ namespace typed_array_reduceright {
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const initialValue = arguments.length >= 2 ? arguments[1] : Hole;
+ const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
return ReduceRightAllElements(uarray, callbackfn, initialValue);
}
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index f45654b71e..c0087ae1be 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -53,7 +53,7 @@ namespace typed_array_slice {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice
transitioning javascript builtin TypedArrayPrototypeSlice(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// arguments[0] = start
// arguments[1] = end
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index 991cad6b1b..7056650fba 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -29,8 +29,8 @@ namespace typed_array_some {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.some
transitioning javascript builtin
- TypedArrayPrototypeSome(implicit context:
- Context)(receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeSome(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg.
try {
diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq
index 54b945f44e..4f98123f82 100644
--- a/deps/v8/src/builtins/typed-array-subarray.tq
+++ b/deps/v8/src/builtins/typed-array-subarray.tq
@@ -5,7 +5,8 @@
namespace typed_array_subarray {
// ES %TypedArray%.prototype.subarray
transitioning javascript builtin TypedArrayPrototypeSubArray(
- context: Context, receiver: Object, ...arguments): JSTypedArray {
+ js-implicit context: Context,
+ receiver: Object)(...arguments): JSTypedArray {
const methodName: constexpr string = '%TypedArray%.prototype.subarray';
// 1. Let O be the this value.
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 8f923947f1..d03c1a0be9 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -65,29 +65,18 @@ namespace typed_array {
implicit context: Context)(JSTypedArray): JSArrayBuffer;
extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
JSTypedArray): TypedArrayElementsInfo;
+ extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(Map):
+ TypedArrayElementsInfo;
extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
ElementsKind): bool;
extern macro LoadFixedTypedArrayElementAsTagged(
- RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object;
+ RawPtr, Smi, constexpr ElementsKind): Numeric;
extern macro StoreJSTypedArrayElementFromTagged(
- Context, JSTypedArray, Smi, Object, constexpr ElementsKind,
- constexpr ParameterMode);
+ Context, JSTypedArray, Smi, Object, constexpr ElementsKind);
type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object;
- // These UnsafeCast specializations are necessary becuase there is no
- // way to definitively test whether an Object is a Torque function
- // with a specific signature, and the default UnsafeCast implementation
- // would try to check this through an assert(Is<>), so the test
- // is bypassed in this specialization.
- UnsafeCast<LoadFn>(implicit context: Context)(o: Object): LoadFn {
- return %RawDownCast<LoadFn>(o);
- }
- UnsafeCast<StoreFn>(implicit context: Context)(o: Object): StoreFn {
- return %RawDownCast<StoreFn>(o);
- }
-
// AttachedJSTypedArray guards that the array's buffer is not detached.
transient type AttachedJSTypedArray extends JSTypedArray;
@@ -201,17 +190,16 @@ namespace typed_array {
}
builtin LoadFixedElement<T: type>(
- context: Context, array: JSTypedArray, index: Smi): Object {
+ _context: Context, array: JSTypedArray, index: Smi): Object {
return LoadFixedTypedArrayElementAsTagged(
- array.data_ptr, index, KindForArrayType<T>(), SMI_PARAMETERS);
+ array.data_ptr, index, KindForArrayType<T>());
}
builtin StoreFixedElement<T: type>(
context: Context, typedArray: JSTypedArray, index: Smi,
value: Object): Object {
StoreJSTypedArrayElementFromTagged(
- context, typedArray, index, value, KindForArrayType<T>(),
- SMI_PARAMETERS);
+ context, typedArray, index, value, KindForArrayType<T>());
return Undefined;
}
@@ -288,7 +276,8 @@ namespace typed_array {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort
transitioning javascript builtin TypedArrayPrototypeSort(
- context: Context, receiver: Object, ...arguments): JSTypedArray {
+ js-implicit context: Context,
+ receiver: Object)(...arguments): JSTypedArray {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
const comparefnObj: Object =
@@ -322,7 +311,7 @@ namespace typed_array {
let loadfn: LoadFn;
let storefn: StoreFn;
- let elementsKind: ElementsKind = array.elements_kind;
+ const elementsKind: ElementsKind = array.elements_kind;
if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
if (elementsKind == INT32_ELEMENTS) {
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 5c09b3a8de..f15c8ba29f 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -1109,10 +1109,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
// Load initial bytecode offset.
@@ -1562,7 +1562,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize;
__ popq(Operand(rsp, offsetToPC));
__ Drop(offsetToPC / kSystemPointerSize);
- __ addq(Operand(rsp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
+
+ // Replace the builtin index Smi on the stack with the instruction start
+ // address of the builtin from the builtins table, and then Ret to this
+ // address
+ __ movq(kScratchRegister, Operand(rsp, 0));
+ __ movq(kScratchRegister,
+ __ EntryFromBuiltinIndexAsOperand(kScratchRegister));
+ __ movq(Operand(rsp, 0), kScratchRegister);
+
__ Ret();
}
} // namespace
@@ -3002,21 +3010,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ addl(Operand(base_reg, kLevelOffset), Immediate(1));
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Move(rax, ExternalReference::is_profiling_address(isolate));
__ cmpb(Operand(rax, 0), Immediate(0));
- __ j(zero, &profiler_disabled);
-
- // Third parameter is the address of the actual getter function.
- __ Move(thunk_last_arg, function_address);
- __ Move(rax, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- // Call the api function!
- __ Move(rax, function_address);
-
+ __ j(not_zero, &profiler_enabled);
+ __ Move(rax, ExternalReference::address_of_runtime_stats_flag());
+ __ cmpl(Operand(rax, 0), Immediate(0));
+ __ j(not_zero, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ Move(rax, function_address);
+ __ jmp(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Third parameter is the address of the actual getter function.
+ __ Move(thunk_last_arg, function_address);
+ __ Move(rax, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Call the api function!
@@ -3065,6 +3076,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ CompareRoot(map, RootIndex::kHeapNumberMap);
__ j(equal, &ok, Label::kNear);
+ __ CompareRoot(map, RootIndex::kBigIntMap);
+ __ j(equal, &ok, Label::kNear);
+
__ CompareRoot(return_value, RootIndex::kUndefinedValue);
__ j(equal, &ok, Label::kNear);
diff --git a/deps/v8/src/codegen/DEPS b/deps/v8/src/codegen/DEPS
new file mode 100644
index 0000000000..f3715e6ad0
--- /dev/null
+++ b/deps/v8/src/codegen/DEPS
@@ -0,0 +1,9 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+specific_include_rules = {
+ "external-reference.cc": [
+ "+src/regexp/regexp-macro-assembler-arch.h",
+ ],
+}
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 345e80a16e..feb2f62f78 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -1,9 +1,12 @@
-ahaas@chromium.org
+bbudge@chromium.org
bmeurer@chromium.org
clemensh@chromium.org
+gdeepti@chromium.org
+ishell@chromium.org
jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
+leszeks@chromium.org
mslekova@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
@@ -11,3 +14,6 @@ neis@chromium.org
rmcilroy@chromium.org
sigurds@chromium.org
tebbi@chromium.org
+titzer@chromium.org
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index c8ef586fc1..7ca49a3f9f 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -2210,7 +2210,7 @@ void Assembler::stm(BlockAddrMode am, Register base, RegList src,
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-arm.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code) {
+void Assembler::stop(Condition cond, int32_t code) {
#ifndef __arm__
DCHECK_GE(code, kDefaultStopCode);
{
@@ -4827,12 +4827,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::CONST_POOL);
- // We can share CODE_TARGETs because we don't patch the code objects anymore,
- // and we make sure we emit only one reloc info for them (thus delta patching)
- // will apply the delta only once. At the moment, we do not dedup code targets
- // if they are wrapped in a heap object request (value == 0).
+ // We can share CODE_TARGETs and embedded objects, but we must make sure we
+ // only emit one reloc info for them (thus delta patching will apply the delta
+ // only once). At the moment, we do not deduplicate heap object request which
+ // are indicated by value == 0.
bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) ||
- (rmode == RelocInfo::CODE_TARGET && value != 0);
+ (rmode == RelocInfo::CODE_TARGET && value != 0) ||
+ (RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 4db825fa97..f383632f73 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -625,8 +625,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
- void stop(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode);
+ void stop(Condition cond = al, int32_t code = kDefaultStopCode);
void bkpt(uint32_t imm16); // v5 and above
void svc(uint32_t imm24, Condition cond = al);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index bcda320f8b..ba334cd0b6 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -303,20 +303,24 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, mode);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
- mov(builtin_pointer,
- Operand(builtin_pointer, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
- add(builtin_pointer, builtin_pointer,
+ mov(builtin_index,
+ Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
+ add(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
- ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -632,7 +636,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
add(scratch, object, Operand(offset - kHeapObjectTag));
tst(scratch, Operand(kPointerSize - 1));
b(eq, &ok);
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -1951,15 +1955,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -2402,7 +2406,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
- stop("Unexpected alignment");
+ stop();
bind(&alignment_as_expected);
}
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 4f497dcea4..e4ce734f52 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -300,7 +300,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool check_constant_pool = true);
void Call(Label* target);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index 5680d8b054..baae106c1c 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -5,6 +5,9 @@
#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
+#include <type_traits>
+
+#include "src/base/memory.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@@ -22,8 +25,9 @@ void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references and immediate branches need extra work.
if (RelocInfo::IsInternalReference(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
- intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
- *p += delta; // Relocate entry.
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(pc_);
+ internal_ref += delta; // Relocate entry.
+ WriteUnalignedValue<intptr_t>(pc_, internal_ref);
} else {
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
@@ -193,17 +197,16 @@ inline VRegister CPURegister::Q() const {
// Default initializer is for int types
template <typename T>
struct ImmediateInitializer {
- static const bool kIsIntType = true;
static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
+ STATIC_ASSERT(std::is_integral<T>::value || std::is_enum<T>::value);
return t;
}
};
template <>
struct ImmediateInitializer<Smi> {
- static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; }
static inline int64_t immediate_for(Smi t) {
return static_cast<int64_t>(t.ptr());
@@ -212,7 +215,6 @@ struct ImmediateInitializer<Smi> {
template <>
struct ImmediateInitializer<ExternalReference> {
- static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(ExternalReference t) {
return RelocInfo::EXTERNAL_REFERENCE;
}
@@ -222,8 +224,9 @@ struct ImmediateInitializer<ExternalReference> {
};
template <typename T>
-Immediate::Immediate(Handle<T> value) {
- InitializeHandle(value);
+Immediate::Immediate(Handle<T> handle, RelocInfo::Mode mode)
+ : value_(static_cast<intptr_t>(handle.address())), rmode_(mode) {
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(mode));
}
template <typename T>
@@ -234,13 +237,9 @@ Immediate::Immediate(T t)
template <typename T>
Immediate::Immediate(T t, RelocInfo::Mode rmode)
: value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) {
- STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
+ STATIC_ASSERT(std::is_integral<T>::value);
}
-// Operand.
-template <typename T>
-Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
-
template <typename T>
Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
@@ -479,7 +478,7 @@ void Assembler::Unreachable() {
Address Assembler::target_pointer_address_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
- DCHECK(instr->IsLdrLiteralX());
+ DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
}
@@ -494,6 +493,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
}
+Tagged_t Assembler::target_compressed_address_at(Address pc,
+ Address constant_pool) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ CHECK(instr->IsLdrLiteralW());
+ return Memory<Tagged_t>(target_pointer_address_at(pc));
+}
+
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
@@ -502,14 +508,39 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
- return GetCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2);
+ return Handle<Code>::cast(
+ GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
}
}
-Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
+AssemblerBase::EmbeddedObjectIndex
+Assembler::embedded_object_index_referenced_from(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
- CHECK(!instr->IsLdrLiteralX());
- return GetCompressedEmbeddedObject(ReadUnalignedValue<int32_t>(pc));
+ if (instr->IsLdrLiteralX()) {
+ STATIC_ASSERT(sizeof(EmbeddedObjectIndex) == sizeof(intptr_t));
+ return Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc));
+ } else {
+ DCHECK(instr->IsLdrLiteralW());
+ return Memory<uint32_t>(target_pointer_address_at(pc));
+ }
+}
+
+void Assembler::set_embedded_object_index_referenced_from(
+ Address pc, EmbeddedObjectIndex data) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ if (instr->IsLdrLiteralX()) {
+ Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc)) = data;
+ } else {
+ DCHECK(instr->IsLdrLiteralW());
+ DCHECK(is_uint32(data));
+ WriteUnalignedValue<uint32_t>(target_pointer_address_at(pc),
+ static_cast<uint32_t>(data));
+ }
+}
+
+Handle<HeapObject> Assembler::target_object_handle_at(Address pc) {
+ return GetEmbeddedObject(
+ Assembler::embedded_object_index_referenced_from(pc));
}
Address Assembler::runtime_entry_at(Address pc) {
@@ -557,7 +588,7 @@ void Assembler::deserialization_set_special_target_at(Address location,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory<Address>(pc) = target;
+ WriteUnalignedValue<Address>(pc, target);
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
@@ -585,12 +616,21 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
}
+void Assembler::set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ CHECK(instr->IsLdrLiteralW());
+ Memory<Tagged_t>(target_pointer_address_at(pc)) = target;
+}
+
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
} else {
- DCHECK(reinterpret_cast<Instruction*>(pc_)->IsLdrLiteralX());
- return kSystemPointerSize;
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
+ return instr->IsLdrLiteralW() ? kTaggedSize : kSystemPointerSize;
}
}
@@ -629,19 +669,30 @@ Address RelocInfo::constant_pool_entry_address() {
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- return HeapObject::cast(
- Object(Assembler::target_address_at(pc_, constant_pool_)));
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ host_.address(),
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+ }
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
- return target_object();
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ isolate,
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return target_object();
+ }
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- if (IsFullEmbeddedObject(rmode_)) {
- return Handle<HeapObject>(reinterpret_cast<Address*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsEmbeddedObjectMode(rmode_)) {
+ return origin->target_object_handle_at(pc_);
} else {
DCHECK(IsCodeTarget(rmode_));
return origin->code_target_object_handle_at(pc_);
@@ -651,9 +702,15 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
- icache_flush_mode);
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(
+ pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
+ } else {
+ DCHECK(IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
@@ -673,7 +730,7 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory<Address>(pc_);
+ return ReadUnalignedValue<Address>(pc_);
}
Address RelocInfo::target_internal_reference_address() {
@@ -701,11 +758,14 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
- Memory<Address>(pc_) = kNullAddress;
+ WriteUnalignedValue<Address>(pc_, kNullAddress);
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(pc_, constant_pool_,
+ kNullAddress);
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
@@ -1025,9 +1085,7 @@ inline void Assembler::CheckBuffer() {
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true);
}
- if (pc_offset() >= next_constant_pool_check_) {
- CheckConstPool(false, true);
- }
+ constpool_.MaybeCheck();
}
} // namespace internal
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 1806f82b46..159e763ba2 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -34,6 +34,7 @@
#include "src/base/cpu.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
#include "src/execution/frame-constants.h"
@@ -283,11 +284,6 @@ bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
return true;
}
-void Immediate::InitializeHandle(Handle<HeapObject> handle) {
- value_ = static_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
-}
-
bool Operand::NeedsRelocation(const Assembler* assembler) const {
RelocInfo::Mode rmode = immediate_.rmode();
@@ -298,167 +294,6 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return !RelocInfo::IsNone(rmode);
}
-bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
- int offset) {
- auto existing = entry_map.find(data);
- if (existing == entry_map.end()) {
- entry_map[data] = static_cast<int>(entries_.size());
- entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
- return true;
- }
- int index = existing->second;
- entries_[index].second.push_back(offset);
- return false;
-}
-
-// Constant Pool.
-bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
- DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL &&
- mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
- mode != RelocInfo::DEOPT_INLINING_ID &&
- mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
-
- bool write_reloc_info = true;
-
- uint64_t raw_data = static_cast<uint64_t>(data);
- int offset = assm_->pc_offset();
- if (IsEmpty()) {
- first_use_ = offset;
- }
-
- if (RelocInfo::IsShareableRelocMode(mode)) {
- write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
- } else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
- // A zero data value is a placeholder and must not be shared.
- write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
- } else {
- entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
- }
-
- if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
- // Request constant pool emission after the next instruction.
- assm_->SetNextConstPoolCheckIn(1);
- }
-
- return write_reloc_info;
-}
-
-int ConstPool::DistanceToFirstUse() {
- DCHECK_GE(first_use_, 0);
- return assm_->pc_offset() - first_use_;
-}
-
-int ConstPool::MaxPcOffset() {
- // There are no pending entries in the pool so we can never get out of
- // range.
- if (IsEmpty()) return kMaxInt;
-
- // Entries are not necessarily emitted in the order they are added so in the
- // worst case the first constant pool use will be accessing the last entry.
- return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
-}
-
-int ConstPool::WorstCaseSize() {
- if (IsEmpty()) return 0;
-
- // Max size prologue:
- // b over
- // ldr xzr, #pool_size
- // blr xzr
- // nop
- // All entries are 64-bit for now.
- return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
-}
-
-int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
- if (IsEmpty()) return 0;
-
- // Prologue is:
- // b over ;; if require_jump
- // ldr xzr, #pool_size
- // blr xzr
- // nop ;; if not 64-bit aligned
- int prologue_size = require_jump ? kInstrSize : 0;
- prologue_size += 2 * kInstrSize;
- prologue_size +=
- IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
-
- // All entries are 64-bit for now.
- return prologue_size + EntryCount() * kSystemPointerSize;
-}
-
-void ConstPool::Emit(bool require_jump) {
- DCHECK(!assm_->is_const_pool_blocked());
- // Prevent recursive pool emission and protect from veneer pools.
- Assembler::BlockPoolsScope block_pools(assm_);
-
- int size = SizeIfEmittedAtCurrentPc(require_jump);
- Label size_check;
- assm_->bind(&size_check);
-
- assm_->RecordConstPool(size);
- // Emit the constant pool. It is preceded by an optional branch if
- // require_jump and a header which will:
- // 1) Encode the size of the constant pool, for use by the disassembler.
- // 2) Terminate the program, to try to prevent execution from accidentally
- // flowing into the constant pool.
- // 3) align the pool entries to 64-bit.
- // The header is therefore made of up to three arm64 instructions:
- // ldr xzr, #<size of the constant pool in 32-bit words>
- // blr xzr
- // nop
- //
- // If executed, the header will likely segfault and lr will point to the
- // instruction following the offending blr.
- // TODO(all): Make the alignment part less fragile. Currently code is
- // allocated as a byte array so there are no guarantees the alignment will
- // be preserved on compaction. Currently it works as allocation seems to be
- // 64-bit aligned.
-
- // Emit branch if required
- Label after_pool;
- if (require_jump) {
- assm_->b(&after_pool);
- }
-
- // Emit the header.
- assm_->RecordComment("[ Constant Pool");
- EmitMarker();
- EmitGuard();
- assm_->Align(8);
-
- // Emit constant pool entries.
- // TODO(all): currently each relocated constant is 64 bits, consider adding
- // support for 32-bit entries.
- EmitEntries();
- assm_->RecordComment("]");
-
- if (after_pool.is_linked()) {
- assm_->bind(&after_pool);
- }
-
- DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
- static_cast<unsigned>(size));
-}
-
-void ConstPool::Clear() {
- shared_entries_.clear();
- handle_to_index_map_.clear();
- entries_.clear();
- first_use_ = -1;
-}
-
-void ConstPool::EmitMarker() {
- // A constant pool size is expressed in number of 32-bits words.
- // Currently all entries are 64-bit.
- // + 1 is for the crash guard.
- // + 0/1 for alignment.
- int word_count =
- EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
- assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
- Assembler::Rt(xzr));
-}
-
MemOperand::PairResult MemOperand::AreConsistentForPair(
const MemOperand& operandA, const MemOperand& operandB,
int access_size_log2) {
@@ -484,47 +319,18 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
return kNotPair;
}
-void ConstPool::EmitGuard() {
-#ifdef DEBUG
- Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
- DCHECK(instr->preceding()->IsLdrLiteralX() &&
- instr->preceding()->Rt() == xzr.code());
-#endif
- assm_->EmitPoolGuard();
-}
-
-void ConstPool::EmitEntries() {
- DCHECK(IsAligned(assm_->pc_offset(), 8));
-
- // Emit entries.
- for (const auto& entry : entries_) {
- for (const auto& pc : entry.second) {
- Instruction* instr = assm_->InstructionAt(pc);
-
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
- }
-
- assm_->dc64(entry.first);
- }
- Clear();
-}
-
// Assembler
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- constpool_(this),
- unresolved_branches_() {
- const_pool_blocked_nesting_ = 0;
+ unresolved_branches_(),
+ constpool_(this) {
veneer_pool_blocked_nesting_ = 0;
Reset();
}
Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
- DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
}
@@ -533,7 +339,6 @@ void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
void Assembler::Reset() {
#ifdef DEBUG
DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size()));
- DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
memset(buffer_start_, 0, pc_ - buffer_start_);
@@ -541,9 +346,7 @@ void Assembler::Reset() {
pc_ = buffer_start_;
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
constpool_.Clear();
- next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
- no_const_pool_before_ = 0;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
@@ -554,14 +357,16 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
request.heap_number(), AllocationType::kOld);
- set_target_address_at(pc, 0 /* unused */, object.address());
+ EmbeddedObjectIndex index = AddEmbeddedObject(object);
+ set_embedded_object_index_referenced_from(pc, index);
break;
}
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
- set_target_address_at(pc, 0 /* unused */,
- str->AllocateStringConstant(isolate).address());
+ EmbeddedObjectIndex index =
+ AddEmbeddedObject(str->AllocateStringConstant(isolate));
+ set_embedded_object_index_referenced_from(pc, index);
break;
}
}
@@ -572,7 +377,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// Emit constant pool if necessary.
- CheckConstPool(true, false);
+ ForceConstantPoolEmissionWithoutJump();
DCHECK(constpool_.IsEmpty());
int code_comments_size = WriteCodeComments();
@@ -870,32 +675,6 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
}
}
-void Assembler::StartBlockConstPool() {
- if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_constant_pool_check_ = kMaxInt;
- }
-}
-
-void Assembler::EndBlockConstPool() {
- if (--const_pool_blocked_nesting_ == 0) {
- // Check the constant pool hasn't been blocked for too long.
- DCHECK(pc_offset() < constpool_.MaxPcOffset());
- // Two cases:
- // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
- // will trigger a check.
- next_constant_pool_check_ = no_const_pool_before_;
- }
-}
-
-bool Assembler::is_const_pool_blocked() const {
- return (const_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_const_pool_before_);
-}
-
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
@@ -1497,6 +1276,7 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
if (operand.IsHeapObjectRequest()) {
+ BlockPoolsScope no_pool_before_ldr_of_heap_object_request(this);
RequestHeapObject(operand.heap_object_request());
ldr(rt, operand.immediate_for_heap_object_request());
} else {
@@ -1505,11 +1285,8 @@ void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
}
void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
- // Currently we only support 64-bit literals.
- DCHECK(rt.Is64Bits());
-
+ BlockPoolsScope no_pool_before_ldr_pcrel_instr(this);
RecordRelocInfo(imm.rmode(), imm.value());
- BlockConstPoolFor(1);
// The load will be patched when the constpool is emitted, patching code
// expect a load literal with offset 0.
ldr_pcrel(rt, 0);
@@ -3679,6 +3456,7 @@ void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
}
void Assembler::dcptr(Label* label) {
+ BlockPoolsScope no_pool_inbetween(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
if (label->is_bound()) {
// The label is bound, so it does not need to be updated and the internal
@@ -4471,8 +4249,10 @@ void Assembler::GrowBuffer() {
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- intptr_t* p = reinterpret_cast<intptr_t*>(buffer_start_ + pos);
- *p += pc_delta;
+ Address address = reinterpret_cast<intptr_t>(buffer_start_) + pos;
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
+ internal_ref += pc_delta;
+ WriteUnalignedValue<intptr_t>(address, internal_ref);
}
// Pending relocation entries are also relative, no need to relocate.
@@ -4492,17 +4272,31 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
- bool new_constpool_entry = constpool_.RecordEntry(data, rmode);
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
- if (!new_constpool_entry) return;
+ if (RelocInfo::IsEmbeddedObjectMode(rmode)) {
+ Handle<HeapObject> handle(reinterpret_cast<Address*>(data));
+ data = AddEmbeddedObject(handle);
+ }
+ if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) {
+ if (constpool_.RecordEntry(static_cast<uint32_t>(data), rmode) ==
+ RelocInfoStatus::kMustOmitForDuplicate) {
+ return;
+ }
+ } else {
+ if (constpool_.RecordEntry(static_cast<uint64_t>(data), rmode) ==
+ RelocInfoStatus::kMustOmitForDuplicate) {
+ return;
+ }
+ }
}
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
if (!ShouldRecordRelocInfo(rmode)) return;
+ // Callers should ensure that constant pool emission is blocked until the
+ // instruction the reloc info is associated with has been emitted.
+ DCHECK(constpool_.IsBlocked());
+
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
@@ -4511,103 +4305,127 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
+ BlockPoolsScope no_pool_before_b_instr(this);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
b(offset);
}
void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
+ BlockPoolsScope no_pool_before_bl_instr(this);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
bl(offset);
}
void Assembler::near_call(HeapObjectRequest request) {
+ BlockPoolsScope no_pool_before_bl_instr(this);
RequestHeapObject(request);
- int index = AddCodeTarget(Handle<Code>());
+ EmbeddedObjectIndex index = AddEmbeddedObject(Handle<Code>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
- bl(index);
+ DCHECK(is_int32(index));
+ bl(static_cast<int>(index));
}
-void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstrSize;
- if (no_const_pool_before_ < pc_limit) {
- no_const_pool_before_ = pc_limit;
- // Make sure the pool won't be blocked for too long.
- DCHECK(pc_limit < constpool_.MaxPcOffset());
- }
+// Constant Pool
- if (next_constant_pool_check_ < no_const_pool_before_) {
- next_constant_pool_check_ = no_const_pool_before_;
- }
+void ConstantPool::EmitPrologue(Alignment require_alignment) {
+ // Recorded constant pool size is expressed in number of 32-bits words,
+ // and includes prologue and alignment, but not the jump around the pool
+ // and the size of the marker itself.
+ const int marker_size = 1;
+ int word_count =
+ ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size;
+ assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
+ Assembler::Rt(xzr));
+ assm_->EmitPoolGuard();
}
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Some short sequence of instruction mustn't be broken up by constant pool
- // emission, such sequences are protected by calls to BlockConstPoolFor and
- // BlockConstPoolScope.
- if (is_const_pool_blocked()) {
- // Something is wrong if emission is forced and blocked at the same time.
- DCHECK(!force_emit);
- return;
- }
+int ConstantPool::PrologueSize(Jump require_jump) const {
+ // Prologue is:
+ // b over ;; if require_jump
+ // ldr xzr, #pool_size
+ // blr xzr
+ int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
+ prologue_size += 2 * kInstrSize;
+ return prologue_size;
+}
- // There is nothing to do if there are no pending constant pool entries.
- if (constpool_.IsEmpty()) {
- // Calculate the offset of the next check.
- SetNextConstPoolCheckIn(kCheckConstPoolInterval);
- return;
- }
+void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
+ Instruction* entry_offset,
+ const ConstantPoolKey& key) {
+ Instruction* instr = assm_->InstructionAt(load_offset);
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+ instr->SetImmPCOffsetTarget(assm_->options(), entry_offset);
+}
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance to the first instruction accessing the constant pool is
- // kApproxMaxDistToConstPool or more.
- // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
- int dist = constpool_.DistanceToFirstUse();
- int count = constpool_.EntryCount();
- if (!force_emit && (dist < kApproxMaxDistToConstPool) &&
- (count < kApproxMaxPoolEntryCount)) {
+void ConstantPool::Check(Emission force_emit, Jump require_jump,
+ size_t margin) {
+ // Some short sequence of instruction must not be broken up by constant pool
+ // emission, such sequences are protected by a ConstPool::BlockScope.
+ if (IsBlocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ DCHECK_EQ(force_emit, Emission::kIfNeeded);
return;
}
- // Emit veneers for branches that would go out of range during emission of the
- // constant pool.
- int worst_case_size = constpool_.WorstCaseSize();
- CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size);
+ // We emit a constant pool only if :
+ // * it is not empty
+ // * emission is forced by parameter force_emit (e.g. at function end).
+ // * emission is mandatory or opportune according to {ShouldEmitNow}.
+ if (!IsEmpty() && (force_emit == Emission::kForced ||
+ ShouldEmitNow(require_jump, margin))) {
+ // Emit veneers for branches that would go out of range during emission of
+ // the constant pool.
+ int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
+ assm_->CheckVeneerPool(false, require_jump == Jump::kRequired,
+ assm_->kVeneerDistanceMargin + worst_case_size +
+ static_cast<int>(margin));
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (this includes the gap to the relocation information).
+ int needed_space = worst_case_size + assm_->kGap;
+ while (assm_->buffer_space() <= needed_space) {
+ assm_->GrowBuffer();
+ }
- // Check that the code buffer is large enough before emitting the constant
- // pool (this includes the gap to the relocation information).
- int needed_space = worst_case_size + kGap + 1 * kInstrSize;
- while (buffer_space() <= needed_space) {
- GrowBuffer();
+ EmitAndClear(require_jump);
}
-
- Label size_check;
- bind(&size_check);
- constpool_.Emit(require_jump);
- DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
- static_cast<unsigned>(worst_case_size));
-
- // Since a constant pool was just emitted, move the check offset forward by
+ // Since a constant pool is (now) empty, move the check offset forward by
// the standard interval.
- SetNextConstPoolCheckIn(kCheckConstPoolInterval);
+ SetNextCheckIn(ConstantPool::kCheckInterval);
}
-bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+// Pool entries are accessed with pc relative load therefore this cannot be more
+// than 1 * MB. Since constant pool emission checks are interval based, and we
+// want to keep entries close to the code, we try to emit every 64KB.
+const size_t ConstantPool::kMaxDistToPool32 = 1 * MB;
+const size_t ConstantPool::kMaxDistToPool64 = 1 * MB;
+const size_t ConstantPool::kCheckInterval = 128 * kInstrSize;
+const size_t ConstantPool::kApproxDistToPool32 = 64 * KB;
+const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
+
+const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
+const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
+const size_t ConstantPool::kApproxMaxEntryCount = 512;
+
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstrSize;
- return pc_offset() >
- max_reachable_pc - margin - protection_offset -
- static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+ return static_cast<intptr_t>(pc_offset() + margin + protection_offset +
+ unresolved_branches_.size() *
+ kMaxVeneerCodeSize) >= max_reachable_pc;
}
void Assembler::RecordVeneerPool(int location_offset, int size) {
+ Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip);
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
reloc_info_writer.Write(&rinfo);
}
-void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
- BlockPoolsScope scope(this);
+void Assembler::EmitVeneers(bool force_emit, bool need_protection,
+ size_t margin) {
+ BlockPoolsScope scope(this, PoolEmissionCheck::kSkip);
RecordComment("[ Veneers");
// The exact size of the veneer pool must be recorded (see the comment at the
@@ -4677,7 +4495,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
}
void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
- int margin) {
+ size_t margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
@@ -4713,6 +4531,7 @@ int Assembler::buffer_space() const {
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
+ Assembler::BlockPoolsScope block_pools(this);
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 04cd422241..6a6bf633c1 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -35,7 +35,8 @@ class SafepointTableBuilder;
class Immediate {
public:
template <typename T>
- inline explicit Immediate(Handle<T> handle);
+ inline explicit Immediate(
+ Handle<T> handle, RelocInfo::Mode mode = RelocInfo::FULL_EMBEDDED_OBJECT);
// This is allowed to be an implicit constructor because Immediate is
// a wrapper class that doesn't normally perform any type conversion.
@@ -49,8 +50,6 @@ class Immediate {
RelocInfo::Mode rmode() const { return rmode_; }
private:
- V8_EXPORT_PRIVATE void InitializeHandle(Handle<HeapObject> value);
-
int64_t value_;
RelocInfo::Mode rmode_;
};
@@ -85,9 +84,6 @@ class Operand {
inline HeapObjectRequest heap_object_request() const;
inline Immediate immediate_for_heap_object_request() const;
- template <typename T>
- inline explicit Operand(Handle<T> handle);
-
// Implicit constructor for all int types, ExternalReference, and Smi.
template <typename T>
inline Operand(T t); // NOLINT(runtime/explicit)
@@ -174,60 +170,6 @@ class MemOperand {
unsigned shift_amount_;
};
-class ConstPool {
- public:
- explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
- // Returns true when we need to write RelocInfo and false when we do not.
- bool RecordEntry(intptr_t data, RelocInfo::Mode mode);
- int EntryCount() const { return static_cast<int>(entries_.size()); }
- bool IsEmpty() const { return entries_.empty(); }
- // Distance in bytes between the current pc and the first instruction
- // using the pool. If there are no pending entries return kMaxInt.
- int DistanceToFirstUse();
- // Offset after which instructions using the pool will be out of range.
- int MaxPcOffset();
- // Maximum size the constant pool can be with current entries. It always
- // includes alignment padding and branch over.
- int WorstCaseSize();
- // Size in bytes of the literal pool *if* it is emitted at the current
- // pc. The size will include the branch over the pool if it was requested.
- int SizeIfEmittedAtCurrentPc(bool require_jump);
- // Emit the literal pool at the current pc with a branch over the pool if
- // requested.
- void Emit(bool require_jump);
- // Discard any pending pool entries.
- void Clear();
-
- private:
- void EmitMarker();
- void EmitGuard();
- void EmitEntries();
-
- using SharedEntryMap = std::map<uint64_t, int>;
- // Adds a shared entry to entries_, using 'entry_map' to determine whether we
- // already track this entry. Returns true if this is the first time we add
- // this entry, false otherwise.
- bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset);
-
- Assembler* assm_;
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_use_;
-
- // Map of data to index in entries_ for shared entries.
- SharedEntryMap shared_entries_;
-
- // Map of address of handle to index in entries_. We need to keep track of
- // code targets separately from other shared entries, as they can be
- // relocated.
- SharedEntryMap handle_to_index_map_;
-
- // Values, pc offset(s) of entries. Use a vector to preserve the order of
- // insertion, as the serializer expects code target RelocInfo to point to
- // constant pool addresses in an ascending order.
- std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
-};
-
// -----------------------------------------------------------------------------
// Assembler.
@@ -312,15 +254,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Tagged_t target_compressed_address_at(Address pc,
+ Address constant_pool);
inline static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ inline static void set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
// Returns the handle for the code object called at 'pc'.
// This might need to be temporarily encoded as an offset into code_targets_.
inline Handle<Code> code_target_object_handle_at(Address pc);
-
- inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
+ inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc);
+ inline void set_embedded_object_index_referenced_from(
+ Address p, EmbeddedObjectIndex index);
+ // Returns the handle for the heap object referenced at 'pc'.
+ inline Handle<HeapObject> target_object_handle_at(Address pc);
// Returns the target address for a runtime function for the call encoded
// at 'pc'.
@@ -371,16 +324,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
- // Prevent contant pool emission until EndBlockConstPool is called.
- // Call to this function can be nested but must be followed by an equal
- // number of calls to EndBlockConstpool.
- void StartBlockConstPool();
-
- // Resume constant pool emission. Need to be called as many time as
- // StartBlockConstPool to have an effect.
- void EndBlockConstPool();
-
- bool is_const_pool_blocked() const;
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
@@ -399,16 +342,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return veneer_pool_blocked_nesting_ > 0;
}
- // Block/resume emission of constant pools and veneer pools.
- void StartBlockPools() {
- StartBlockConstPool();
- StartBlockVeneerPool();
- }
- void EndBlockPools() {
- EndBlockConstPool();
- EndBlockVeneerPool();
- }
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -2120,8 +2053,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Code generation helpers --------------------------------------------------
- bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
-
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(ptrdiff_t offset) const {
@@ -2405,31 +2336,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// FP register type.
inline static Instr FPType(VRegister fd);
- // Class for scoping postponing the constant pool generation.
- class BlockConstPoolScope {
- public:
- explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockConstPool();
- }
- ~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
- };
-
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
- // Check if is time to emit a constant pool.
- void CheckConstPool(bool force_emit, bool require_jump);
+ void ForceConstantPoolEmissionWithoutJump() {
+ constpool_.Check(Emission::kForced, Jump::kOmitted);
+ }
+ void ForceConstantPoolEmissionWithJump() {
+ constpool_.Check(Emission::kForced, Jump::kRequired);
+ }
+ // Check if the const pool needs to be emitted while pretending that {margin}
+ // more bytes of instructions have already been emitted.
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
+ }
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
- int margin = kVeneerDistanceMargin);
- bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
+ size_t margin = kVeneerDistanceMargin);
+ bool ShouldEmitVeneers(size_t margin = kVeneerDistanceMargin) {
return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
}
@@ -2443,23 +2369,34 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// If need_protection is true, the veneers are protected by a branch jumping
// over the code.
void EmitVeneers(bool force_emit, bool need_protection,
- int margin = kVeneerDistanceMargin);
+ size_t margin = kVeneerDistanceMargin);
void EmitVeneersGuard() { EmitPoolGuard(); }
// Checks whether veneers need to be emitted at this point.
// If force_emit is set, a veneer is generated for *all* unresolved branches.
void CheckVeneerPool(bool force_emit, bool require_jump,
- int margin = kVeneerDistanceMargin);
+ size_t margin = kVeneerDistanceMargin);
+
+ using BlockConstPoolScope = ConstantPool::BlockScope;
class BlockPoolsScope {
public:
- explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockPools();
+ // Block veneer and constant pool. Emits pools if necessary to ensure that
+ // {margin} more bytes can be emitted without triggering pool emission.
+ explicit BlockPoolsScope(Assembler* assem, size_t margin = 0)
+ : assem_(assem), block_const_pool_(assem, margin) {
+ assem_->CheckVeneerPool(false, true, margin);
+ assem_->StartBlockVeneerPool();
+ }
+
+ BlockPoolsScope(Assembler* assem, PoolEmissionCheck check)
+ : assem_(assem), block_const_pool_(assem, check) {
+ assem_->StartBlockVeneerPool();
}
- ~BlockPoolsScope() { assem_->EndBlockPools(); }
+ ~BlockPoolsScope() { assem_->EndBlockVeneerPool(); }
private:
Assembler* assem_;
-
+ BlockConstPoolScope block_const_pool_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
@@ -2622,15 +2559,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Verify that a label's link chain is intact.
void CheckLabelLinkChain(Label const* label);
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Set how far from current pc the next constant pool check will be.
- void SetNextConstPoolCheckIn(int instructions) {
- next_constant_pool_check_ = pc_offset() + instructions * kInstrSize;
- }
-
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
@@ -2658,40 +2586,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void CheckBufferSpace();
void CheckBuffer();
- // Pc offset of the next constant pool check.
- int next_constant_pool_check_;
-
- // Constant pool generation
- // Pools are emitted in the instruction stream. They are emitted when:
- // * the distance to the first use is above a pre-defined distance or
- // * the numbers of entries in the pool is above a pre-defined size or
- // * code generation is finished
- // If a pool needs to be emitted before code generation is finished a branch
- // over the emitted pool will be inserted.
-
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static constexpr int kCheckConstPoolInterval = 128;
-
- // Distance to first use after a which a pool will be emitted. Pool entries
- // are accessed with pc relative load therefore this cannot be more than
- // 1 * MB. Since constant pool emission checks are interval based this value
- // is an approximation.
- static constexpr int kApproxMaxDistToConstPool = 64 * KB;
-
- // Number of pool entries after which a pool will be emitted. Since constant
- // pool emission checks are interval based this value is an approximation.
- static constexpr int kApproxMaxPoolEntryCount = 512;
-
- // Emission of the constant pool may be blocked in some code sequences.
- int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
-
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -2705,16 +2599,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// are already bound.
std::deque<int> internal_reference_positions_;
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
-
- // The pending constant pool.
- ConstPool constpool_;
-
protected:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
@@ -2727,17 +2611,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
#ifdef DEBUG
// Functions used for testing.
- int GetConstantPoolEntriesSizeForTesting() const {
+ size_t GetConstantPoolEntriesSizeForTesting() const {
// Do not include branch over the pool.
- return constpool_.EntryCount() * kSystemPointerSize;
+ return constpool_.Entry32Count() * kInt32Size +
+ constpool_.Entry64Count() * kInt64Size;
}
- static constexpr int GetCheckConstPoolIntervalForTesting() {
- return kCheckConstPoolInterval;
+ static size_t GetCheckConstPoolIntervalForTesting() {
+ return ConstantPool::kCheckInterval;
}
- static constexpr int GetApproxMaxDistToConstPoolForTesting() {
- return kApproxMaxDistToConstPool;
+ static size_t GetApproxMaxDistToConstPoolForTesting() {
+ return ConstantPool::kApproxDistToPool64;
}
#endif
@@ -2779,7 +2664,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
- // This is similar to next_constant_pool_check_ and helps reduce the overhead
+ // This PC-offset of the next veneer pool check helps reduce the overhead
// of checking for veneer pools.
// It is maintained to the closest unresolved branch limit minus the maximum
// veneer margin (or kMaxInt if there are no unresolved branches).
@@ -2804,8 +2689,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
int WriteCodeComments();
+ // The pending constant pool.
+ ConstantPool constpool_;
+
friend class EnsureSpace;
- friend class ConstPool;
+ friend class ConstantPool;
};
class PatchingAssembler : public Assembler {
@@ -2822,19 +2710,12 @@ class PatchingAssembler : public Assembler {
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
: Assembler(options,
- ExternalAssemblerBuffer(start, count * kInstrSize + kGap)) {
- // Block constant pool emission.
- StartBlockPools();
- }
+ ExternalAssemblerBuffer(start, count * kInstrSize + kGap)),
+ block_constant_pool_emission_scope(this) {}
~PatchingAssembler() {
- // Const pool should still be blocked.
- DCHECK(is_const_pool_blocked());
- EndBlockPools();
// Verify we have generated the number of instruction we expected.
DCHECK_EQ(pc_offset() + kGap, buffer_->size());
- // Verify no relocation information has been emitted.
- DCHECK(IsConstPoolEmpty());
}
// See definition of PatchAdrFar() for details.
@@ -2842,11 +2723,19 @@ class PatchingAssembler : public Assembler {
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
void PatchSubSp(uint32_t immediate);
+
+ private:
+ BlockPoolsScope block_constant_pool_emission_scope;
};
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) { assembler->CheckBufferSpace(); }
+ explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
+ assembler->CheckBufferSpace();
+ }
+
+ private:
+ Assembler::BlockPoolsScope block_pools_scope_;
};
} // namespace internal
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index eb3fb3a6be..a1e962452b 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -32,8 +32,8 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
-constexpr size_t kLoadLiteralScaleLog2 = 2;
-constexpr size_t kMaxLoadLiteralRange = 1 * MB;
+constexpr uint8_t kLoadLiteralScaleLog2 = 2;
+constexpr int kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;
diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc
index e0ab589914..32bcc6f268 100644
--- a/deps/v8/src/codegen/arm64/cpu-arm64.cc
+++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc
@@ -15,7 +15,7 @@ namespace internal {
class CacheLineSizes {
public:
CacheLineSizes() {
-#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN)
+#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN) || defined(__APPLE__)
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
diff --git a/deps/v8/src/codegen/arm64/decoder-arm64.h b/deps/v8/src/codegen/arm64/decoder-arm64.h
index 3d113eb836..7621c516ce 100644
--- a/deps/v8/src/codegen/arm64/decoder-arm64.h
+++ b/deps/v8/src/codegen/arm64/decoder-arm64.h
@@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE DecoderVisitor {
};
// A visitor that dispatches to a list of visitors.
-class DispatchingDecoderVisitor : public DecoderVisitor {
+class V8_EXPORT_PRIVATE DispatchingDecoderVisitor : public DecoderVisitor {
public:
DispatchingDecoderVisitor() {}
virtual ~DispatchingDecoderVisitor() {}
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index 5c3cf687e7..a73c3feed7 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -203,6 +203,7 @@ class Instruction {
}
bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; }
+ bool IsLdrLiteralW() const { return Mask(LoadLiteralMask) == LDR_w_lit; }
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index aab9fc79a2..792a8637f6 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -291,8 +291,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
ExternalReference reference = bit_cast<ExternalReference>(addr);
IndirectLoadExternalReference(rd, reference);
return;
- } else if (operand.ImmediateRMode() ==
- RelocInfo::FULL_EMBEDDED_OBJECT) {
+ } else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
Handle<HeapObject> x(
reinterpret_cast<Address*>(operand.ImmediateValue()));
IndirectLoadConstant(rd, x);
@@ -1866,7 +1865,9 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
if (CanUseNearCallOrJump(rmode)) {
- JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
+ EmbeddedObjectIndex index = AddEmbeddedObject(code);
+ DCHECK(is_int32(index));
+ JumpHelper(static_cast<int64_t>(index), rmode, cond);
} else {
Jump(code.address(), rmode, cond);
}
@@ -1912,7 +1913,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
}
if (CanUseNearCallOrJump(rmode)) {
- near_call(AddCodeTarget(code), rmode);
+ EmbeddedObjectIndex index = AddEmbeddedObject(code);
+ DCHECK(is_int32(index));
+ near_call(static_cast<int32_t>(index), rmode);
} else {
IndirectCall(code.address(), rmode);
}
@@ -1925,24 +1928,27 @@ void TurboAssembler::Call(ExternalReference target) {
Call(temp);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiShiftSize == 0);
- Lsl(builtin_pointer, builtin_pointer, kSystemPointerSizeLog2 - kSmiShift);
+ Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift);
#else
STATIC_ASSERT(kSmiShiftSize == 31);
- Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2);
+ Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
#endif
- Add(builtin_pointer, builtin_pointer,
- IsolateData::builtin_entry_table_offset());
- Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset());
+ Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -2723,7 +2729,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
Ldrsw(destination, field_operand);
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInGeneratedCode) {
UseScratchRegisterScope temps(this);
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
@@ -2747,7 +2753,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const Register& source) {
RecordComment("[ DecompressAnyTagged");
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInGeneratedCode) {
UseScratchRegisterScope temps(this);
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index f217c3c586..d4e9c3055b 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -852,7 +852,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -1920,17 +1923,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
class InstructionAccurateScope {
public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
- : tasm_(tasm)
+ : tasm_(tasm),
+ block_pool_(tasm, count * kInstrSize)
#ifdef DEBUG
,
size_(count * kInstrSize)
#endif
{
- // Before blocking the const pool, see if it needs to be emitted.
- tasm_->CheckConstPool(false, true);
- tasm_->CheckVeneerPool(false, true);
-
- tasm_->StartBlockPools();
+ tasm_->CheckVeneerPool(false, true, count * kInstrSize);
+ tasm_->StartBlockVeneerPool();
#ifdef DEBUG
if (count != 0) {
tasm_->bind(&start_);
@@ -1941,7 +1942,7 @@ class InstructionAccurateScope {
}
~InstructionAccurateScope() {
- tasm_->EndBlockPools();
+ tasm_->EndBlockVeneerPool();
#ifdef DEBUG
if (start_.is_bound()) {
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
@@ -1952,6 +1953,7 @@ class InstructionAccurateScope {
private:
TurboAssembler* tasm_;
+ TurboAssembler::BlockConstPoolScope block_pool_;
#ifdef DEBUG
size_t size_;
Label start_;
@@ -1979,7 +1981,7 @@ class UseScratchRegisterScope {
DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
}
- ~UseScratchRegisterScope();
+ V8_EXPORT_PRIVATE ~UseScratchRegisterScope();
// Take a register from the appropriate temps list. It will be returned
// automatically when the scope ends.
@@ -1993,10 +1995,11 @@ class UseScratchRegisterScope {
}
Register AcquireSameSizeAs(const Register& reg);
- VRegister AcquireSameSizeAs(const VRegister& reg);
+ V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg);
private:
- static CPURegister AcquireNextAvailable(CPURegList* available);
+ V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
+ CPURegList* available);
// Available scratch registers.
CPURegList* available_; // kRegister
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index b429786aa9..741866dfd6 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -559,8 +559,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 687ae98bfe..498afb0320 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -64,8 +64,8 @@ AssemblerOptions AssemblerOptions::Default(
// might be run on real hardware.
options.enable_simulator_code = !serializer;
#endif
- options.inline_offheap_trampolines =
- FLAG_embedded_builtins && !serializer && !generating_embedded_builtin;
+ options.inline_offheap_trampolines &=
+ !serializer && !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
const base::AddressRegion& code_range =
isolate->heap()->memory_allocator()->code_range();
@@ -226,23 +226,33 @@ int AssemblerBase::AddCodeTarget(Handle<Code> target) {
}
}
-int AssemblerBase::AddCompressedEmbeddedObject(Handle<HeapObject> object) {
- int current = static_cast<int>(compressed_embedded_objects_.size());
- compressed_embedded_objects_.push_back(object);
- return current;
+Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
+ DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
+ return code_targets_[code_target_index];
}
-Handle<HeapObject> AssemblerBase::GetCompressedEmbeddedObject(
- intptr_t index) const {
- DCHECK_LT(static_cast<size_t>(index), compressed_embedded_objects_.size());
- return compressed_embedded_objects_[index];
+AssemblerBase::EmbeddedObjectIndex AssemblerBase::AddEmbeddedObject(
+ Handle<HeapObject> object) {
+ EmbeddedObjectIndex current = embedded_objects_.size();
+ // Do not deduplicate invalid handles, they are to heap object requests.
+ if (!object.is_null()) {
+ auto entry = embedded_objects_map_.find(object);
+ if (entry != embedded_objects_map_.end()) {
+ return entry->second;
+ }
+ embedded_objects_map_[object] = current;
+ }
+ embedded_objects_.push_back(object);
+ return current;
}
-Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
- DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
- return code_targets_[code_target_index];
+Handle<HeapObject> AssemblerBase::GetEmbeddedObject(
+ EmbeddedObjectIndex index) const {
+ DCHECK_LT(index, embedded_objects_.size());
+ return embedded_objects_[index];
}
+
int Assembler::WriteCodeComments() {
if (!FLAG_code_comments || code_comments_writer_.entry_count() == 0) return 0;
int offset = pc_offset();
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index eae5d53a4f..98639583d8 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -36,7 +36,9 @@
#define V8_CODEGEN_ASSEMBLER_H_
#include <forward_list>
+#include <unordered_map>
+#include "src/base/memory.h"
#include "src/codegen/code-comments.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference.h"
@@ -55,6 +57,10 @@ class ApiFunction;
namespace internal {
+using base::Memory;
+using base::ReadUnalignedValue;
+using base::WriteUnalignedValue;
+
// Forward declarations.
class EmbeddedData;
class InstructionStream;
@@ -155,7 +161,7 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
bool isolate_independent_code = false;
// Enables the use of isolate-independent builtins through an off-heap
// trampoline. (macro assembler feature).
- bool inline_offheap_trampolines = false;
+ bool inline_offheap_trampolines = FLAG_embedded_builtins;
// On some platforms, all code is within a given range in the process,
// and the start of this range is configured here.
Address code_range_start = 0;
@@ -272,8 +278,11 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int AddCodeTarget(Handle<Code> target);
Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
- int AddCompressedEmbeddedObject(Handle<HeapObject> object);
- Handle<HeapObject> GetCompressedEmbeddedObject(intptr_t index) const;
+ // Add 'object' to the {embedded_objects_} vector and return the index at
+ // which it is stored.
+ using EmbeddedObjectIndex = size_t;
+ EmbeddedObjectIndex AddEmbeddedObject(Handle<HeapObject> object);
+ Handle<HeapObject> GetEmbeddedObject(EmbeddedObjectIndex index) const;
// The buffer into which code and relocation info are generated.
std::unique_ptr<AssemblerBuffer> buffer_;
@@ -321,12 +330,18 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// the code handle in the vector instead.
std::vector<Handle<Code>> code_targets_;
- // When pointer compression is enabled, we need to store indexes to this
- // table in the code until we are ready to copy the code and embed the real
- // object pointers. We don't need to do the same thing for non-compressed
- // embedded objects, because we've got enough space (kPointerSize) in the
- // code stream to just embed the address of the object handle.
- std::vector<Handle<HeapObject>> compressed_embedded_objects_;
+ // If an assembler needs a small number to refer to a heap object handle
+ // (for example, because there are only 32bit available on a 64bit arch), the
+ // assembler adds the object into this vector using AddEmbeddedObject, and
+ // may then refer to the heap object using the handle's index in this vector.
+ std::vector<Handle<HeapObject>> embedded_objects_;
+
+ // Embedded objects are deduplicated based on handle location. This is a
+ // compromise that is almost as effective as deduplication based on actual
+ // heap object addresses maintains GC safety.
+ std::unordered_map<Handle<HeapObject>, EmbeddedObjectIndex,
+ Handle<HeapObject>::hash, Handle<HeapObject>::equal_to>
+ embedded_objects_map_;
const AssemblerOptions options_;
uint64_t enabled_cpu_features_;
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index d967d84874..390746c27d 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -63,57 +63,27 @@ void CodeStubAssembler::HandleBreakOnNode() {
void CodeStubAssembler::Assert(const BranchGenerator& branch,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5,
- const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
if (FLAG_debug_code) {
- Check(branch, message, file, line, extra_node1, extra_node1_name,
- extra_node2, extra_node2_name, extra_node3, extra_node3_name,
- extra_node4, extra_node4_name, extra_node5, extra_node5_name);
+ Check(branch, message, file, line, extra_nodes);
}
#endif
}
void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5,
- const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
if (FLAG_debug_code) {
- Check(condition_body, message, file, line, extra_node1, extra_node1_name,
- extra_node2, extra_node2_name, extra_node3, extra_node3_name,
- extra_node4, extra_node4_name, extra_node5, extra_node5_name);
+ Check(condition_body, message, file, line, extra_nodes);
}
#endif
}
-#ifdef DEBUG
-namespace {
-void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node,
- const char* node_name) {
- if (node != nullptr) {
- csa->CallRuntime(Runtime::kPrintWithNameForAssert, csa->SmiConstant(0),
- csa->StringConstant(node_name), node);
- }
-}
-} // namespace
-#endif
-
void CodeStubAssembler::Check(const BranchGenerator& branch,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5, const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
Label ok(this);
Label not_ok(this, Label::kDeferred);
if (message != nullptr && FLAG_code_comments) {
@@ -124,9 +94,7 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
branch(&ok, &not_ok);
BIND(&not_ok);
- FailAssert(message, file, line, extra_node1, extra_node1_name, extra_node2,
- extra_node2_name, extra_node3, extra_node3_name, extra_node4,
- extra_node4_name, extra_node5, extra_node5_name);
+ FailAssert(message, file, line, extra_nodes);
BIND(&ok);
Comment("] Assert");
@@ -134,20 +102,14 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
void CodeStubAssembler::Check(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5, const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
BranchGenerator branch = [=](Label* ok, Label* not_ok) {
Node* condition = condition_body();
DCHECK_NOT_NULL(condition);
Branch(condition, ok, not_ok);
};
- Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2,
- extra_node2_name, extra_node3, extra_node3_name, extra_node4,
- extra_node4_name, extra_node5, extra_node5_name);
+ Check(branch, message, file, line, extra_nodes);
}
void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
@@ -162,31 +124,25 @@ void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
}
void CodeStubAssembler::FailAssert(
- const char* message, const char* file, int line, Node* extra_node1,
- const char* extra_node1_name, Node* extra_node2,
- const char* extra_node2_name, Node* extra_node3,
- const char* extra_node3_name, Node* extra_node4,
- const char* extra_node4_name, Node* extra_node5,
- const char* extra_node5_name) {
+ const char* message, const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes) {
DCHECK_NOT_NULL(message);
EmbeddedVector<char, 1024> chars;
if (file != nullptr) {
- SNPrintF(chars, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
- } else {
- SNPrintF(chars, "CSA_ASSERT failed: %s\n", message);
+ SNPrintF(chars, "%s [%s:%d]", message, file, line);
+ message = chars.begin();
}
- Node* message_node = StringConstant(chars.begin());
+ Node* message_node = StringConstant(message);
#ifdef DEBUG
// Only print the extra nodes in debug builds.
- MaybePrintNodeWithName(this, extra_node1, extra_node1_name);
- MaybePrintNodeWithName(this, extra_node2, extra_node2_name);
- MaybePrintNodeWithName(this, extra_node3, extra_node3_name);
- MaybePrintNodeWithName(this, extra_node4, extra_node4_name);
- MaybePrintNodeWithName(this, extra_node5, extra_node5_name);
+ for (auto& node : extra_nodes) {
+ CallRuntime(Runtime::kPrintWithNameForAssert, SmiConstant(0),
+ StringConstant(node.second), node.first);
+ }
#endif
- DebugAbort(message_node);
+ AbortCSAAssert(message_node);
Unreachable();
}
@@ -567,7 +523,7 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
if (SmiValuesAre31Bits() && kSystemPointerSize == kInt64Size) {
// Check that the Smi value is properly sign-extended.
- TNode<IntPtrT> value = Signed(BitcastTaggedToWord(smi));
+ TNode<IntPtrT> value = Signed(BitcastTaggedSignedToWord(smi));
return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value)));
}
return Int32TrueConstant();
@@ -611,7 +567,8 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
if (ToIntPtrConstant(value, constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
- return Signed(WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
+ return Signed(
+ WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
@@ -660,13 +617,14 @@ TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
- return BitcastWordToTaggedSigned(TryIntPtrAdd(
- BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs), if_overflow));
+ return BitcastWordToTaggedSigned(
+ TryIntPtrAdd(BitcastTaggedSignedToWord(lhs),
+ BitcastTaggedSignedToWord(rhs), if_overflow));
} else {
DCHECK(SmiValuesAre31Bits());
- TNode<PairT<Int32T, BoolT>> pair =
- Int32AddWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
+ TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -678,16 +636,16 @@ TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
- BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
+ BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<IntPtrT> result = Projection<0>(pair);
return BitcastWordToTaggedSigned(result);
} else {
DCHECK(SmiValuesAre31Bits());
- TNode<PairT<Int32T, BoolT>> pair =
- Int32SubWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
+ TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow(
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -933,7 +891,7 @@ TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor,
BIND(&divisor_is_not_minus_one);
TNode<Int32T> untagged_result = Int32Div(untagged_dividend, untagged_divisor);
- TNode<Int32T> truncated = Signed(Int32Mul(untagged_result, untagged_divisor));
+ TNode<Int32T> truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout);
@@ -973,9 +931,12 @@ TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
}
TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
- return WordNotEqual(
- WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
- IntPtrConstant(0));
+ // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we
+ // can nonetheless use it to inspect the Smi tag. The assumption here is that
+ // the GC will not exchange Smis for HeapObjects or vice-versa.
+ TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a));
+ return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)),
+ IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
@@ -1031,7 +992,7 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
TNode<Int32T> prototype_instance_type = LoadMapInstanceType(prototype_map);
// Pessimistically assume elements if a Proxy, Special API Object,
- // or JSValue wrapper is found on the prototype chain. After this
+ // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this
// instance type check, it's not necessary to check for interceptors or
// access checks.
Label if_custom(this, Label::kDeferred), if_notcustom(this);
@@ -1040,11 +1001,12 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
BIND(&if_custom);
{
- // For string JSValue wrappers we still support the checks as long
- // as they wrap the empty string.
- GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE),
- possibly_elements);
- Node* prototype_value = LoadJSValueValue(prototype);
+ // For string JSPrimitiveWrapper wrappers we still support the checks as
+ // long as they wrap the empty string.
+ GotoIfNot(
+ InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
+ possibly_elements);
+ Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype);
Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
}
@@ -1121,20 +1083,23 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this);
bool needs_double_alignment = flags & kDoubleAlignment;
+ bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation;
- if (flags & kAllowLargeObjectAllocation) {
+ if (allow_large_object_allocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
+ TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
+ AllocateDoubleAlignFlag::encode(needs_double_alignment) |
+ AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (FLAG_young_generation_large_objects) {
- result = CallRuntime(Runtime::kAllocateInYoungGeneration,
- NoContextConstant(), SmiTag(size_in_bytes));
+ result =
+ CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
} else {
- TNode<Smi> alignment_flag = SmiConstant(Smi::FromInt(
- AllocateDoubleAlignFlag::encode(needs_double_alignment)));
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
- SmiTag(size_in_bytes), alignment_flag);
+ SmiTag(size_in_bytes), runtime_flags);
}
Goto(&out);
@@ -1161,15 +1126,17 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
BIND(&runtime_call);
{
+ TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
+ AllocateDoubleAlignFlag::encode(needs_double_alignment) |
+ AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (flags & kPretenured) {
- TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
- AllocateDoubleAlignFlag::encode(needs_double_alignment)));
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
- result = CallRuntime(Runtime::kAllocateInYoungGeneration,
- NoContextConstant(), SmiTag(size_in_bytes));
+ result =
+ CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
}
Goto(&out);
}
@@ -1394,14 +1361,15 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType type) {
CSA_ASSERT(this, IsStrong(object));
- return Load(type, object, IntPtrConstant(offset - kHeapObjectTag));
+ return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
MachineType type) {
CSA_ASSERT(this, IsStrong(object));
- return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
+ return LoadFromObject(type, object,
+ IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
@@ -1469,12 +1437,18 @@ TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
object, HeapNumber::kValueOffset, MachineType::Float64()));
}
+TNode<Map> CodeStubAssembler::GetStructMap(InstanceType instance_type) {
+ Handle<Map> map_handle(Map::GetStructMap(isolate(), instance_type),
+ isolate());
+ return HeapConstant(map_handle);
+}
+
TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset,
MachineType::TaggedPointer()));
}
-TNode<Int32T> CodeStubAssembler::LoadInstanceType(
+TNode<Uint16T> CodeStubAssembler::LoadInstanceType(
SloppyTNode<HeapObject> object) {
return LoadMapInstanceType(LoadMap(object));
}
@@ -1591,8 +1565,8 @@ TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32()));
}
-TNode<Int32T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
- return UncheckedCast<Int32T>(
+TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
+ return UncheckedCast<Uint16T>(
LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16()));
}
@@ -1700,12 +1674,10 @@ TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties(
TNode<Map> map, TNode<Int32T> instance_type, Label* bailout) {
- // This check can have false positives, since it applies to any JSValueType.
+ // This check can have false positives, since it applies to any
+ // JSPrimitiveWrapper type.
GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout);
- GotoIf(IsSetWord32(LoadMapBitField2(map), Map::HasHiddenPrototypeBit::kMask),
- bailout);
-
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask), bailout);
@@ -1810,9 +1782,9 @@ Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
-Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
- CSA_ASSERT(this, IsJSValue(object));
- return LoadObjectField(object, JSValue::kValueOffset);
+Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) {
+ CSA_ASSERT(this, IsJSPrimitiveWrapper(object));
+ return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset);
}
void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
@@ -1941,11 +1913,13 @@ TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
return LoadAndUntagWeakFixedArrayLength(array);
}
-template <typename Array>
-TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
- TNode<Array> array, int array_header_size, Node* index_node,
- int additional_offset, ParameterMode parameter_mode,
- LoadSensitivity needs_poisoning) {
+template <typename Array, typename T>
+TNode<T> CodeStubAssembler::LoadArrayElement(TNode<Array> array,
+ int array_header_size,
+ Node* index_node,
+ int additional_offset,
+ ParameterMode parameter_mode,
+ LoadSensitivity needs_poisoning) {
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0)));
@@ -1955,8 +1929,13 @@ TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
parameter_mode, header_size);
CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
array_header_size));
- return UncheckedCast<MaybeObject>(
- Load(MachineType::AnyTagged(), array, offset, needs_poisoning));
+ constexpr MachineType machine_type = MachineTypeOf<T>::value;
+ // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning
+ if (needs_poisoning == LoadSensitivity::kSafe) {
+ return UncheckedCast<T>(LoadFromObject(machine_type, array, offset));
+ } else {
+ return UncheckedCast<T>(Load(machine_type, array, offset, needs_poisoning));
+ }
}
template TNode<MaybeObject>
@@ -2046,8 +2025,8 @@ TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore(
IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
}
-Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
- Node* data_pointer, Node* offset) {
+TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
if (Is64()) {
TNode<IntPtrT> value = UncheckedCast<IntPtrT>(
Load(MachineType::IntPtr(), data_pointer, offset));
@@ -2059,13 +2038,15 @@ Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#else
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<IntPtrT> high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#endif
return BigIntFromInt32Pair(low, high);
}
@@ -2176,8 +2157,9 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
return var_result.value();
}
-Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
- Node* data_pointer, Node* offset) {
+compiler::TNode<BigInt>
+CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
Label if_zero(this), done(this);
if (Is64()) {
TNode<UintPtrT> value = UncheckedCast<UintPtrT>(
@@ -2190,13 +2172,15 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#else
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#endif
return BigIntFromUint32Pair(low, high);
}
@@ -2244,10 +2228,10 @@ TNode<BigInt> CodeStubAssembler::BigIntFromUint64(TNode<UintPtrT> value) {
return var_result.value();
}
-Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
+ TNode<RawPtrT> data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
- Node* offset =
+ TNode<IntPtrT> offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
switch (elements_kind) {
case UINT8_ELEMENTS: /* fall through */
@@ -2281,7 +2265,8 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
}
TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
- TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind) {
+ TNode<RawPtrT> data_pointer, TNode<Smi> index,
+ TNode<Int32T> elements_kind) {
TVARIABLE(Numeric, var_result);
Label done(this), if_unknown_type(this, Label::kDeferred);
int32_t elements_kinds[] = {
@@ -2307,12 +2292,12 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
BIND(&if_unknown_type);
Unreachable();
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- BIND(&if_##type##array); \
- { \
- var_result = CAST(LoadFixedTypedArrayElementAsTagged( \
- data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \
- Goto(&done); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ BIND(&if_##type##array); \
+ { \
+ var_result = LoadFixedTypedArrayElementAsTagged( \
+ data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS); \
+ Goto(&done); \
}
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -2323,8 +2308,7 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
void CodeStubAssembler::StoreJSTypedArrayElementFromTagged(
TNode<Context> context, TNode<JSTypedArray> typed_array,
- TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
- ParameterMode parameter_mode) {
+ TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) {
TNode<RawPtrT> data_pointer = LoadJSTypedArrayBackingStore(typed_array);
switch (elements_kind) {
case UINT8_ELEMENTS:
@@ -2333,26 +2317,26 @@ void CodeStubAssembler::StoreJSTypedArrayElementFromTagged(
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- SmiToInt32(CAST(value)), parameter_mode);
+ SmiToInt32(CAST(value)), SMI_PARAMETERS);
break;
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- TruncateTaggedToWord32(context, value), parameter_mode);
+ TruncateTaggedToWord32(context, value), SMI_PARAMETERS);
break;
case FLOAT32_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))),
- parameter_mode);
+ SMI_PARAMETERS);
break;
case FLOAT64_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- LoadHeapNumberValue(CAST(value)), parameter_mode);
+ LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS);
break;
case BIGUINT64_ELEMENTS:
case BIGINT64_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- UncheckedCast<BigInt>(value), parameter_mode);
+ UncheckedCast<BigInt>(value), SMI_PARAMETERS);
break;
default:
UNREACHABLE();
@@ -2925,15 +2909,12 @@ TNode<Int32T> CodeStubAssembler::EnsureArrayPushable(TNode<Map> map,
// Disallow pushing onto prototypes. It might be the JSArray prototype.
// Disallow pushing onto non-extensible objects.
Comment("Disallow pushing onto prototypes");
- Node* bit_field2 = LoadMapBitField2(map);
- int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask;
- Node* test = Word32And(bit_field2, Int32Constant(mask));
- GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)),
- bailout);
+ GotoIfNot(IsExtensibleNonPrototypeMap(map), bailout);
EnsureArrayLengthWritable(map, bailout);
- TNode<Uint32T> kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ TNode<Uint32T> kind =
+ DecodeWord32<Map::ElementsKindBits>(LoadMapBitField2(map));
return Signed(kind);
}
@@ -3022,7 +3003,7 @@ void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
GotoIfNotNumber(value, bailout);
}
if (IsDoubleElementsKind(kind)) {
- value = ChangeNumberToFloat64(value);
+ value = ChangeNumberToFloat64(CAST(value));
}
StoreElement(elements, kind, index, value, mode);
}
@@ -3131,14 +3112,10 @@ TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
}
TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
- // This is currently used only for 64-bit wide BigInts. If more general
- // applicability is required, a large-object check must be added.
- CSA_ASSERT(this, UintPtrLessThan(length, IntPtrConstant(3)));
-
TNode<IntPtrT> size =
IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
Signed(WordShl(length, kSystemPointerSizeLog2)));
- Node* raw_result = Allocate(size, kNone);
+ Node* raw_result = Allocate(size, kAllowLargeObjectAllocation);
StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
@@ -3155,11 +3132,26 @@ void CodeStubAssembler::StoreBigIntBitfield(TNode<BigInt> bigint,
MachineRepresentation::kWord32);
}
-void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint,
+ intptr_t digit_index,
TNode<UintPtrT> digit) {
+ CHECK_LE(0, digit_index);
+ CHECK_LT(digit_index, BigInt::kMaxLength);
StoreObjectFieldNoWriteBarrier(
- bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize, digit,
- UintPtrT::kMachineRepresentation);
+ bigint,
+ BigInt::kDigitsOffset +
+ static_cast<int>(digit_index) * kSystemPointerSize,
+ digit, UintPtrT::kMachineRepresentation);
+}
+
+void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint,
+ TNode<IntPtrT> digit_index,
+ TNode<UintPtrT> digit) {
+ TNode<IntPtrT> offset =
+ IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset),
+ IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize)));
+ StoreObjectFieldNoWriteBarrier(bigint, offset, digit,
+ UintPtrT::kMachineRepresentation);
}
TNode<Word32T> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
@@ -3168,10 +3160,23 @@ TNode<Word32T> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
}
TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
- int digit_index) {
- return UncheckedCast<UintPtrT>(LoadObjectField(
- bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize,
- MachineType::UintPtr()));
+ intptr_t digit_index) {
+ CHECK_LE(0, digit_index);
+ CHECK_LT(digit_index, BigInt::kMaxLength);
+ return UncheckedCast<UintPtrT>(
+ LoadObjectField(bigint,
+ BigInt::kDigitsOffset +
+ static_cast<int>(digit_index) * kSystemPointerSize,
+ MachineType::UintPtr()));
+}
+
+TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
+ TNode<IntPtrT> digit_index) {
+ TNode<IntPtrT> offset =
+ IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset),
+ IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize)));
+ return UncheckedCast<UintPtrT>(
+ LoadObjectField(bigint, offset, MachineType::UintPtr()));
}
TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
@@ -3440,16 +3445,16 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
}
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
- TNode<IntPtrT> at_least_space_for) {
+ TNode<IntPtrT> at_least_space_for, AllocationFlags flags) {
CSA_ASSERT(this, UintPtrLessThanOrEqual(
at_least_space_for,
IntPtrConstant(NameDictionary::kMaxCapacity)));
TNode<IntPtrT> capacity = HashTableComputeCapacity(at_least_space_for);
- return AllocateNameDictionaryWithCapacity(capacity);
+ return AllocateNameDictionaryWithCapacity(capacity, flags);
}
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
- TNode<IntPtrT> capacity) {
+ TNode<IntPtrT> capacity, AllocationFlags flags) {
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> length = EntryToIndex<NameDictionary>(capacity);
@@ -3457,39 +3462,51 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
TimesTaggedSize(length), IntPtrConstant(NameDictionary::kHeaderSize));
TNode<NameDictionary> result =
- UncheckedCast<NameDictionary>(AllocateInNewSpace(store_size));
- Comment("Initialize NameDictionary");
+ UncheckedCast<NameDictionary>(Allocate(store_size, flags));
+
// Initialize FixedArray fields.
- DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap));
- StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap);
- StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
- SmiFromIntPtr(length));
+ {
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap);
+ StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+ SmiFromIntPtr(length));
+ }
+
// Initialized HashTable fields.
- TNode<Smi> zero = SmiConstant(0);
- StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
- SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex,
- zero, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
- SmiTag(capacity), SKIP_WRITE_BARRIER);
- // Initialize Dictionary fields.
- TNode<HeapObject> filler = UndefinedConstant();
- StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
- SmiConstant(PropertyDetails::kInitialIndex),
- SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex,
- SmiConstant(PropertyArray::kNoHashSentinel),
- SKIP_WRITE_BARRIER);
+ {
+ TNode<Smi> zero = SmiConstant(0);
+ StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result,
+ NameDictionary::kNumberOfDeletedElementsIndex, zero,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
+ SmiTag(capacity), SKIP_WRITE_BARRIER);
+ // Initialize Dictionary fields.
+ StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
+ SmiConstant(PropertyDetails::kInitialIndex),
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex,
+ SmiConstant(PropertyArray::kNoHashSentinel),
+ SKIP_WRITE_BARRIER);
+ }
// Initialize NameDictionary elements.
- TNode<WordT> result_word = BitcastTaggedToWord(result);
- TNode<WordT> start_address = IntPtrAdd(
- result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
- NameDictionary::kElementsStartIndex) -
- kHeapObjectTag));
- TNode<WordT> end_address = IntPtrAdd(
- result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
- StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+ {
+ TNode<WordT> result_word = BitcastTaggedToWord(result);
+ TNode<WordT> start_address = IntPtrAdd(
+ result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
+ NameDictionary::kElementsStartIndex) -
+ kHeapObjectTag));
+ TNode<WordT> end_address = IntPtrAdd(
+ result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
+
+ TNode<HeapObject> filler = UndefinedConstant();
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kUndefinedValue));
+
+ StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+ }
+
return result;
}
@@ -3605,6 +3622,17 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
+ {
+ // This store overlaps with the header fields stored below.
+ // Since it happens first, it effectively still just zero-initializes the
+ // padding.
+ constexpr int offset =
+ RoundDown<kTaggedSize>(CollectionType::PaddingOffset());
+ STATIC_ASSERT(offset + kTaggedSize == CollectionType::PaddingOffset() +
+ CollectionType::PaddingSize());
+ StoreObjectFieldNoWriteBarrier(table, offset, SmiConstant(0));
+ }
+
// Initialize the SmallOrderedHashTable fields.
StoreObjectByteNoWriteBarrier(
table, CollectionType::NumberOfBucketsOffset(),
@@ -3748,8 +3776,9 @@ void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
-Node* CodeStubAssembler::AllocateJSObjectFromMap(
- Node* map, Node* properties, Node* elements, AllocationFlags flags,
+TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
+ SloppyTNode<Map> map, SloppyTNode<HeapObject> properties,
+ SloppyTNode<FixedArray> elements, AllocationFlags flags,
SlackTrackingMode slack_tracking_mode) {
CSA_ASSERT(this, IsMap(map));
CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
@@ -3761,7 +3790,7 @@ Node* CodeStubAssembler::AllocateJSObjectFromMap(
StoreMapNoWriteBarrier(object, map);
InitializeJSObjectFromMap(object, map, instance_size, properties, elements,
slack_tracking_mode);
- return object;
+ return CAST(object);
}
void CodeStubAssembler::InitializeJSObjectFromMap(
@@ -5508,7 +5537,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
}
BIND(&is_heap_number);
- var_word32->Bind(TruncateHeapNumberValueToWord32(value));
+ var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value)));
CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(if_number);
@@ -5521,9 +5550,10 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
}
}
-Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32(
+ TNode<HeapNumber> object) {
Node* value = LoadHeapNumberValue(object);
- return TruncateFloat64ToWord32(value);
+ return Signed(TruncateFloat64ToWord32(value));
}
void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number,
@@ -5731,10 +5761,7 @@ TNode<Uint32T> CodeStubAssembler::ChangeNumberToUint32(TNode<Number> value) {
return var_result.value();
}
-TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
- SloppyTNode<Number> value) {
- // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode.
- CSA_SLOW_ASSERT(this, IsNumber(value));
+TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(TNode<Number> value) {
TVARIABLE(Float64T, result);
Label smi(this);
Label done(this, &result);
@@ -5795,43 +5822,43 @@ TNode<WordT> CodeStubAssembler::TimesDoubleSize(SloppyTNode<WordT> value) {
return WordShl(value, kDoubleSizeLog2);
}
-Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
- PrimitiveType primitive_type,
- char const* method_name) {
- // We might need to loop once due to JSValue unboxing.
- VARIABLE(var_value, MachineRepresentation::kTagged, value);
+TNode<Object> CodeStubAssembler::ToThisValue(TNode<Context> context,
+ TNode<Object> value,
+ PrimitiveType primitive_type,
+ char const* method_name) {
+ // We might need to loop once due to JSPrimitiveWrapper unboxing.
+ TVARIABLE(Object, var_value, value);
Label loop(this, &var_value), done_loop(this),
done_throw(this, Label::kDeferred);
Goto(&loop);
BIND(&loop);
{
- // Load the current {value}.
- value = var_value.value();
-
// Check if the {value} is a Smi or a HeapObject.
- GotoIf(TaggedIsSmi(value), (primitive_type == PrimitiveType::kNumber)
- ? &done_loop
- : &done_throw);
+ GotoIf(
+ TaggedIsSmi(var_value.value()),
+ (primitive_type == PrimitiveType::kNumber) ? &done_loop : &done_throw);
+
+ TNode<HeapObject> value = CAST(var_value.value());
// Load the map of the {value}.
- Node* value_map = LoadMap(value);
+ TNode<Map> value_map = LoadMap(value);
// Load the instance type of the {value}.
- Node* value_instance_type = LoadMapInstanceType(value_map);
+ TNode<Uint16T> value_instance_type = LoadMapInstanceType(value_map);
- // Check if {value} is a JSValue.
- Label if_valueisvalue(this, Label::kDeferred), if_valueisnotvalue(this);
- Branch(InstanceTypeEqual(value_instance_type, JS_VALUE_TYPE),
- &if_valueisvalue, &if_valueisnotvalue);
+ // Check if {value} is a JSPrimitiveWrapper.
+ Label if_valueiswrapper(this, Label::kDeferred), if_valueisnotwrapper(this);
+ Branch(InstanceTypeEqual(value_instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
+ &if_valueiswrapper, &if_valueisnotwrapper);
- BIND(&if_valueisvalue);
+ BIND(&if_valueiswrapper);
{
// Load the actual value from the {value}.
- var_value.Bind(LoadObjectField(value, JSValue::kValueOffset));
+ var_value = LoadObjectField(value, JSPrimitiveWrapper::kValueOffset);
Goto(&loop);
}
- BIND(&if_valueisnotvalue);
+ BIND(&if_valueisnotwrapper);
{
switch (primitive_type) {
case PrimitiveType::kBoolean:
@@ -5988,13 +6015,12 @@ TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(
TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- Node* bit_field3 = LoadMapBitField3(map);
- return IsSetWord32<Map::IsDictionaryMapBit>(bit_field3);
+ return IsSetWord32<Map::IsDictionaryMapBit>(LoadMapBitField3(map));
}
TNode<BoolT> CodeStubAssembler::IsExtensibleMap(SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField2(map));
+ return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField3(map));
}
TNode<BoolT> CodeStubAssembler::IsFrozenOrSealedElementsKindMap(
@@ -6007,7 +6033,7 @@ TNode<BoolT> CodeStubAssembler::IsFrozenOrSealedElementsKindMap(
TNode<BoolT> CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode<Map> map) {
int kMask = Map::IsExtensibleBit::kMask | Map::IsPrototypeMapBit::kMask;
int kExpected = Map::IsExtensibleBit::kMask;
- return Word32Equal(Word32And(LoadMapBitField2(map), Int32Constant(kMask)),
+ return Word32Equal(Word32And(LoadMapBitField3(map), Int32Constant(kMask)),
Int32Constant(kExpected));
}
@@ -6072,10 +6098,13 @@ TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
-TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid() {
- Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(RootIndex::kRegExpSpeciesProtector);
- Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<PropertyCell> cell = CAST(LoadContextElement(
+ native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX));
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
return WordEqual(cell_value, invalid);
}
@@ -6270,6 +6299,15 @@ TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyInstanceType(
return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyMap(SloppyTNode<Map> map) {
+ return IsJSGlobalProxyInstanceType(LoadMapInstanceType(map));
+}
+
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
+ SloppyTNode<HeapObject> object) {
+ return IsJSGlobalProxyMap(LoadMap(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
SloppyTNode<Int32T> instance_type) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
@@ -6304,26 +6342,22 @@ TNode<BoolT> CodeStubAssembler::IsJSStringIterator(
return HasInstanceType(object, JS_STRING_ITERATOR_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
- SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, JS_GLOBAL_PROXY_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsMap(SloppyTNode<HeapObject> map) {
return IsMetaMap(LoadMap(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSValueInstanceType(
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperInstanceType(
SloppyTNode<Int32T> instance_type) {
- return InstanceTypeEqual(instance_type, JS_VALUE_TYPE);
+ return InstanceTypeEqual(instance_type, JS_PRIMITIVE_WRAPPER_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSValue(SloppyTNode<HeapObject> object) {
- return IsJSValueMap(LoadMap(object));
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapper(
+ SloppyTNode<HeapObject> object) {
+ return IsJSPrimitiveWrapperMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSValueMap(SloppyTNode<Map> map) {
- return IsJSValueInstanceType(LoadMapInstanceType(map));
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
+ return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
@@ -6420,7 +6454,7 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
if (IsDoubleElementsKind(kind)) {
return IsFixedDoubleArray(object);
} else {
- DCHECK(IsSmiOrObjectElementsKind(kind));
+ DCHECK(IsSmiOrObjectElementsKind(kind) || IsSealedElementsKind(kind));
return IsFixedArraySubclass(object);
}
}
@@ -6562,6 +6596,11 @@ TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(
[=] { return Int32FalseConstant(); });
}
+TNode<BoolT> CodeStubAssembler::IsPrivateName(SloppyTNode<Symbol> symbol) {
+ TNode<Uint32T> flags = LoadObjectField<Uint32T>(symbol, Symbol::kFlagsOffset);
+ return IsSetWord32<Symbol::IsPrivateNameBit>(flags);
+}
+
TNode<BoolT> CodeStubAssembler::IsNativeContext(
SloppyTNode<HeapObject> object) {
return WordEqual(LoadMap(object), LoadRoot(RootIndex::kNativeContextMap));
@@ -6769,7 +6808,7 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumberUint32(TNode<HeapNumber> number) {
IsHeapNumberPositive(number),
[=] {
TNode<Float64T> value = LoadHeapNumberValue(number);
- TNode<Uint32T> int_value = Unsigned(TruncateFloat64ToWord32(value));
+ TNode<Uint32T> int_value = TruncateFloat64ToWord32(value);
return Float64Equal(value, ChangeUint32ToFloat64(int_value));
},
[=] { return Int32FalseConstant(); });
@@ -7423,8 +7462,8 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
return result.value();
}
-TNode<String> CodeStubAssembler::StringFromSingleCodePoint(
- TNode<Int32T> codepoint, UnicodeEncoding encoding) {
+TNode<String> CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint(
+ TNode<Int32T> codepoint) {
VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
Label if_isword16(this), if_isword32(this), return_result(this);
@@ -7440,27 +7479,6 @@ TNode<String> CodeStubAssembler::StringFromSingleCodePoint(
BIND(&if_isword32);
{
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- break;
- case UnicodeEncoding::UTF32: {
- // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
- Node* lead_offset = Int32Constant(0xD800 - (0x10000 >> 10));
-
- // lead = (codepoint >> 10) + LEAD_OFFSET
- Node* lead =
- Int32Add(Word32Shr(codepoint, Int32Constant(10)), lead_offset);
-
- // trail = (codepoint & 0x3FF) + 0xDC00;
- Node* trail = Int32Add(Word32And(codepoint, Int32Constant(0x3FF)),
- Int32Constant(0xDC00));
-
- // codpoint = (trail << 16) | lead;
- codepoint = Signed(Word32Or(Word32Shl(trail, Int32Constant(16)), lead));
- break;
- }
- }
-
Node* value = AllocateSeqTwoByteString(2);
StoreNoWriteBarrier(
MachineRepresentation::kWord32, value,
@@ -7513,7 +7531,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
// contains two elements (number and string) for each cache entry.
// TODO(ishell): cleanup mask handling.
Node* mask =
- BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache));
+ BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache));
TNode<IntPtrT> one = IntPtrConstant(1);
mask = IntPtrSub(mask, one);
@@ -7560,8 +7578,8 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
BIND(&if_smi);
{
// Load the smi key, make sure it matches the smi we're looking for.
- Node* smi_index = BitcastWordToTagged(
- WordAnd(WordShl(BitcastTaggedToWord(smi_input.value()), one), mask));
+ Node* smi_index = BitcastWordToTagged(WordAnd(
+ WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask));
Node* smi_key = UnsafeLoadFixedArrayElement(CAST(number_string_cache),
smi_index, 0, SMI_PARAMETERS);
GotoIf(WordNotEqual(smi_key, smi_input.value()), &runtime);
@@ -8333,40 +8351,41 @@ TNode<IntPtrT> CodeStubAssembler::EntryToIndex(TNode<IntPtrT> entry,
field_index));
}
-TNode<MaybeObject> CodeStubAssembler::LoadDescriptorArrayElement(
- TNode<DescriptorArray> object, Node* index, int additional_offset) {
- return LoadArrayElement(object, DescriptorArray::kHeaderSize, index,
- additional_offset);
+template <typename T>
+TNode<T> CodeStubAssembler::LoadDescriptorArrayElement(
+ TNode<DescriptorArray> object, TNode<IntPtrT> index,
+ int additional_offset) {
+ return LoadArrayElement<DescriptorArray, T>(
+ object, DescriptorArray::kHeaderSize, index, additional_offset);
}
TNode<Name> CodeStubAssembler::LoadKeyByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- return CAST(LoadDescriptorArrayElement(container, key_index, 0));
+ return CAST(LoadDescriptorArrayElement<HeapObject>(container, key_index, 0));
}
TNode<Uint32T> CodeStubAssembler::LoadDetailsByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToDetails =
- DescriptorArray::ToDetailsIndex(0) - DescriptorArray::ToKeyIndex(0);
- return Unsigned(
- LoadAndUntagToWord32ArrayElement(container, DescriptorArray::kHeaderSize,
- key_index, kKeyToDetails * kTaggedSize));
+ const int kKeyToDetailsOffset =
+ DescriptorArray::kEntryDetailsOffset - DescriptorArray::kEntryKeyOffset;
+ return Unsigned(LoadAndUntagToWord32ArrayElement(
+ container, DescriptorArray::kHeaderSize, key_index, kKeyToDetailsOffset));
}
TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToValue =
- DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0);
- return CAST(LoadDescriptorArrayElement(container, key_index,
- kKeyToValue * kTaggedSize));
+ const int kKeyToValueOffset =
+ DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset;
+ return LoadDescriptorArrayElement<Object>(container, key_index,
+ kKeyToValueOffset);
}
TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToValue =
- DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0);
- return LoadDescriptorArrayElement(container, key_index,
- kKeyToValue * kTaggedSize);
+ const int kKeyToValueOffset =
+ DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset;
+ return LoadDescriptorArrayElement<MaybeObject>(container, key_index,
+ kKeyToValueOffset);
}
TNode<IntPtrT> CodeStubAssembler::DescriptorEntryToIndex(
@@ -8377,14 +8396,14 @@ TNode<IntPtrT> CodeStubAssembler::DescriptorEntryToIndex(
TNode<Name> CodeStubAssembler::LoadKeyByDescriptorEntry(
TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
- return CAST(LoadDescriptorArrayElement(
+ return CAST(LoadDescriptorArrayElement<HeapObject>(
container, DescriptorEntryToIndex(descriptor_entry),
DescriptorArray::ToKeyIndex(0) * kTaggedSize));
}
TNode<Name> CodeStubAssembler::LoadKeyByDescriptorEntry(
TNode<DescriptorArray> container, int descriptor_entry) {
- return CAST(LoadDescriptorArrayElement(
+ return CAST(LoadDescriptorArrayElement<HeapObject>(
container, IntPtrConstant(0),
DescriptorArray::ToKeyIndex(descriptor_entry) * kTaggedSize));
}
@@ -8406,14 +8425,14 @@ TNode<Uint32T> CodeStubAssembler::LoadDetailsByDescriptorEntry(
TNode<Object> CodeStubAssembler::LoadValueByDescriptorEntry(
TNode<DescriptorArray> container, int descriptor_entry) {
- return CAST(LoadDescriptorArrayElement(
+ return LoadDescriptorArrayElement<Object>(
container, IntPtrConstant(0),
- DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize));
+ DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize);
}
TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByDescriptorEntry(
TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
- return LoadDescriptorArrayElement(
+ return LoadDescriptorArrayElement<MaybeObject>(
container, DescriptorEntryToIndex(descriptor_entry),
DescriptorArray::ToValueIndex(0) * kTaggedSize);
}
@@ -9503,15 +9522,15 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Node* accessor_info = value;
CSA_ASSERT(this, IsAccessorInfo(value));
CSA_ASSERT(this, TaggedIsNotSmi(receiver));
- Label if_array(this), if_function(this), if_value(this);
+ Label if_array(this), if_function(this), if_wrapper(this);
// Dispatch based on {receiver} instance type.
Node* receiver_map = LoadMap(receiver);
Node* receiver_instance_type = LoadMapInstanceType(receiver_map);
GotoIf(IsJSArrayInstanceType(receiver_instance_type), &if_array);
GotoIf(IsJSFunctionInstanceType(receiver_instance_type), &if_function);
- Branch(IsJSValueInstanceType(receiver_instance_type), &if_value,
- if_bailout);
+ Branch(IsJSPrimitiveWrapperInstanceType(receiver_instance_type),
+ &if_wrapper, if_bailout);
// JSArray AccessorInfo case.
BIND(&if_array);
@@ -9538,14 +9557,15 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Goto(&done);
}
- // JSValue AccessorInfo case.
- BIND(&if_value);
+ // JSPrimitiveWrapper AccessorInfo case.
+ BIND(&if_wrapper);
{
- // We only deal with the "length" accessor on JSValue string wrappers.
+ // We only deal with the "length" accessor on JSPrimitiveWrapper string
+ // wrappers.
GotoIfNot(IsLengthString(
LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
if_bailout);
- Node* receiver_value = LoadJSValueValue(receiver);
+ Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver);
GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout);
GotoIfNot(IsString(receiver_value), if_bailout);
var_value.Bind(LoadStringLengthAsSmi(receiver_value));
@@ -9646,8 +9666,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
// clang-format off
int32_t values[] = {
// Handled by {if_isobjectorsmi}.
- PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS,
- HOLEY_ELEMENTS,
+ PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, HOLEY_ELEMENTS,
+ PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
+ HOLEY_FROZEN_ELEMENTS,
// Handled by {if_isdouble}.
PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS,
// Handled by {if_isdictionary}.
@@ -9673,7 +9694,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
};
Label* labels[] = {
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
- &if_isobjectorsmi,
+ &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
+ &if_isobjectorsmi, &if_isobjectorsmi,
&if_isdouble, &if_isdouble,
&if_isdictionary,
&if_isfaststringwrapper,
@@ -9731,8 +9753,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_isfaststringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
- Node* string = LoadJSValueValue(object);
+ CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
+ Node* string = LoadJSPrimitiveWrapperValue(object);
CSA_ASSERT(this, IsString(string));
Node* length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
@@ -9740,8 +9762,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_isslowstringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
- Node* string = LoadJSValueValue(object);
+ CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
+ Node* string = LoadJSPrimitiveWrapperValue(object);
CSA_ASSERT(this, IsString(string));
Node* length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
@@ -9749,7 +9771,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_typedarray);
{
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
GotoIf(IsDetachedBuffer(buffer), if_absent);
TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
@@ -9794,15 +9816,15 @@ void CodeStubAssembler::BranchIfMaybeSpecialIndex(TNode<String> name_string,
}
void CodeStubAssembler::TryPrototypeChainLookup(
- Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder,
+ Node* receiver, Node* object, Node* key,
+ const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder, Label* if_end,
Label* if_bailout, Label* if_proxy) {
// Ensure receiver is JSReceiver, otherwise bailout.
- Label if_objectisnotsmi(this);
- Branch(TaggedIsSmi(receiver), if_bailout, &if_objectisnotsmi);
- BIND(&if_objectisnotsmi);
+ GotoIf(TaggedIsSmi(receiver), if_bailout);
+ CSA_ASSERT(this, TaggedIsNotSmi(object));
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(object);
Node* instance_type = LoadMapInstanceType(map);
{
Label if_objectisreceiver(this);
@@ -9812,9 +9834,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
if_bailout);
BIND(&if_objectisreceiver);
- if (if_proxy) {
- GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy);
- }
+ GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy);
}
VARIABLE(var_index, MachineType::PointerRepresentation());
@@ -9826,7 +9846,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
BIND(&if_iskeyunique);
{
- VARIABLE(var_holder, MachineRepresentation::kTagged, receiver);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, object);
VARIABLE(var_holder_map, MachineRepresentation::kTagged, map);
VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32,
instance_type);
@@ -9872,7 +9892,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
}
BIND(&if_keyisindex);
{
- VARIABLE(var_holder, MachineRepresentation::kTagged, receiver);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, object);
VARIABLE(var_holder_map, MachineRepresentation::kTagged, map);
VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32,
instance_type);
@@ -10049,7 +10069,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
Smi smi_index;
constant_index = ToSmiConstant(index_node, &smi_index);
if (constant_index) index = smi_index.value();
- index_node = BitcastTaggedToWord(index_node);
+ index_node = BitcastTaggedSignedToWord(index_node);
} else {
DCHECK(mode == INTPTR_PARAMETERS);
constant_index = ToIntPtrConstant(index_node, index);
@@ -10594,7 +10614,8 @@ void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode,
- Label* bailout, Node* context) {
+ Label* bailout, Node* context,
+ Variable* maybe_converted_value) {
CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
Node* elements = LoadElements(object);
@@ -10610,12 +10631,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
TNode<IntPtrT> intptr_key = TryToIntptr(key, bailout);
if (IsTypedArrayElementsKind(elements_kind)) {
- Label done(this);
+ Label done(this), update_value_and_bailout(this, Label::kDeferred);
// IntegerIndexedElementSet converts value to a Number/BigInt prior to the
// bounds check.
- value = PrepareValueForWriteToTypedArray(CAST(value), elements_kind,
- CAST(context));
+ Node* converted_value = PrepareValueForWriteToTypedArray(
+ CAST(value), elements_kind, CAST(context));
// There must be no allocations between the buffer load and
// and the actual store to backing store, because GC may decide that
@@ -10623,8 +10644,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// TODO(ishell): introduce DisallowHeapAllocationCode scope here.
// Check if buffer has been detached.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), bailout);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
+ if (maybe_converted_value) {
+ GotoIf(IsDetachedBuffer(buffer), &update_value_and_bailout);
+ } else {
+ GotoIf(IsDetachedBuffer(buffer), bailout);
+ }
// Bounds check.
TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
@@ -10633,27 +10658,88 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// Skip the store if we write beyond the length or
// to a property with a negative integer index.
GotoIfNot(UintPtrLessThan(intptr_key, length), &done);
- } else if (store_mode == STANDARD_STORE) {
- GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
} else {
- // This case is produced due to the dispatched call in
- // ElementsTransitionAndStore and StoreFastElement.
- // TODO(jgruber): Avoid generating unsupported combinations to save code
- // size.
- DebugBreak();
+ DCHECK_EQ(store_mode, STANDARD_STORE);
+ GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout);
}
TNode<RawPtrT> backing_store = LoadJSTypedArrayBackingStore(CAST(object));
- StoreElement(backing_store, elements_kind, intptr_key, value,
+ StoreElement(backing_store, elements_kind, intptr_key, converted_value,
parameter_mode);
Goto(&done);
+ BIND(&update_value_and_bailout);
+ // We already prepared the incoming value for storing into a typed array.
+ // This might involve calling ToNumber in some cases. We shouldn't call
+ // ToNumber again in the runtime so pass the converted value to the runtime.
+ // The prepared value is an untagged value. Convert it to a tagged value
+ // to pass it to runtime. It is not possible to do the detached buffer check
+ // before we prepare the value, since ToNumber can detach the ArrayBuffer.
+ // The spec specifies the order of these operations.
+ if (maybe_converted_value != nullptr) {
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ maybe_converted_value->Bind(SmiFromInt32(converted_value));
+ break;
+ case UINT32_ELEMENTS:
+ maybe_converted_value->Bind(ChangeUint32ToTagged(converted_value));
+ break;
+ case INT32_ELEMENTS:
+ maybe_converted_value->Bind(ChangeInt32ToTagged(converted_value));
+ break;
+ case FLOAT32_ELEMENTS: {
+ Label dont_allocate_heap_number(this), end(this);
+ GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
+ GotoIf(IsHeapNumber(value), &dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(AllocateHeapNumberWithValue(
+ ChangeFloat32ToFloat64(converted_value)));
+ Goto(&end);
+ }
+ BIND(&dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(value);
+ Goto(&end);
+ }
+ BIND(&end);
+ break;
+ }
+ case FLOAT64_ELEMENTS: {
+ Label dont_allocate_heap_number(this), end(this);
+ GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
+ GotoIf(IsHeapNumber(value), &dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(
+ AllocateHeapNumberWithValue(converted_value));
+ Goto(&end);
+ }
+ BIND(&dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(value);
+ Goto(&end);
+ }
+ BIND(&end);
+ break;
+ }
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
+ maybe_converted_value->Bind(converted_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ Goto(bailout);
+
BIND(&done);
return;
}
- DCHECK(
- IsFastElementsKind(elements_kind) ||
- IsInRange(elements_kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS));
+ DCHECK(IsFastElementsKind(elements_kind) ||
+ IsSealedElementsKind(elements_kind));
Node* length = SelectImpl(
IsJSArray(object), [=]() { return LoadJSArrayLength(object); },
@@ -10670,18 +10756,24 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
value = TryTaggedToFloat64(value, bailout);
}
- if (IsGrowStoreMode(store_mode) &&
- !(IsInRange(elements_kind, PACKED_SEALED_ELEMENTS,
- HOLEY_SEALED_ELEMENTS))) {
+ if (IsGrowStoreMode(store_mode) && !IsSealedElementsKind(elements_kind)) {
elements = CheckForCapacityGrow(object, elements, elements_kind, length,
intptr_key, parameter_mode, bailout);
} else {
GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
}
+ // Cannot store to a hole in holey sealed elements so bailout.
+ if (elements_kind == HOLEY_SEALED_ELEMENTS) {
+ TNode<Object> target_value =
+ LoadFixedArrayElement(CAST(elements), intptr_key);
+ GotoIf(IsTheHole(target_value), bailout);
+ }
+
// If we didn't grow {elements}, it might still be COW, in which case we
// copy it now.
- if (!IsSmiOrObjectElementsKind(elements_kind)) {
+ if (!(IsSmiOrObjectElementsKind(elements_kind) ||
+ IsSealedElementsKind(elements_kind))) {
CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (IsCOWHandlingStoreMode(store_mode)) {
elements = CopyElementsOnWrite(object, elements, elements_kind, length,
@@ -10925,7 +11017,8 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
// Link the object to the allocation site list
TNode<ExternalReference> site_list = ExternalConstant(
ExternalReference::allocation_sites_list_address(isolate()));
- TNode<Object> next_site = CAST(LoadBufferObject(site_list, 0));
+ TNode<Object> next_site =
+ LoadBufferObject(ReinterpretCast<RawPtrT>(site_list), 0);
// TODO(mvstanton): This is a store to a weak pointer, which we may want to
// mark as such in order to skip the write barrier, once we have a unified
@@ -12155,8 +12248,9 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
return result.value();
}
-Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
+ SloppyTNode<Object> rhs,
+ Variable* var_type_feedback) {
// Pseudo-code for the algorithm below:
//
// if (lhs == rhs) {
@@ -12208,7 +12302,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Label if_equal(this), if_notequal(this), if_not_equivalent_types(this),
end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kNone);
@@ -12235,7 +12329,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisnotsmi);
{
// Load the map of {lhs}.
- Node* lhs_map = LoadMap(lhs);
+ TNode<Map> lhs_map = LoadMap(CAST(lhs));
// Check if {lhs} is a HeapNumber.
Label if_lhsisnumber(this), if_lhsisnotnumber(this);
@@ -12250,8 +12344,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsissmi);
{
// Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = LoadHeapNumberValue(lhs);
- Node* rhs_value = SmiToFloat64(rhs);
+ Node* lhs_value = LoadHeapNumberValue(CAST(lhs));
+ Node* rhs_value = SmiToFloat64(CAST(rhs));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
@@ -12261,8 +12355,9 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnotsmi);
{
+ TNode<HeapObject> rhs_ho = CAST(rhs);
// Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ TNode<Map> rhs_map = LoadMap(rhs_ho);
// Check if {rhs} is also a HeapNumber.
Label if_rhsisnumber(this), if_rhsisnotnumber(this);
@@ -12271,8 +12366,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnumber);
{
// Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = LoadHeapNumberValue(lhs);
- Node* rhs_value = LoadHeapNumberValue(rhs);
+ Node* lhs_value = LoadHeapNumberValue(CAST(lhs));
+ Node* rhs_value = LoadHeapNumberValue(CAST(rhs));
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kNumber);
@@ -12308,7 +12403,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisstring);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ Node* rhs_instance_type = LoadInstanceType(CAST(rhs));
// Check if {rhs} is also a String.
Label if_rhsisstring(this, Label::kDeferred),
@@ -12325,8 +12420,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
CollectFeedbackForString(rhs_instance_type);
var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback));
}
- result.Bind(CallBuiltin(Builtins::kStringEqual,
- NoContextConstant(), lhs, rhs));
+ result = CAST(CallBuiltin(Builtins::kStringEqual,
+ NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -12344,7 +12439,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisbigint);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
// Check if {rhs} is also a BigInt.
Label if_rhsisbigint(this, Label::kDeferred),
@@ -12356,8 +12451,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
{
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kBigInt);
- result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), lhs, rhs));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -12368,8 +12463,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisnotbigint);
if (var_type_feedback != nullptr) {
// Load the instance type of {rhs}.
- Node* rhs_map = LoadMap(rhs);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ TNode<Map> rhs_map = LoadMap(CAST(rhs));
+ TNode<Uint16T> rhs_instance_type = LoadMapInstanceType(rhs_map);
Label if_lhsissymbol(this), if_lhsisreceiver(this),
if_lhsisoddball(this);
@@ -12442,7 +12537,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnotsmi);
{
// Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ TNode<Map> rhs_map = LoadMap(CAST(rhs));
// The {rhs} could be a HeapNumber with the same value as {lhs}.
Label if_rhsisnumber(this), if_rhsisnotnumber(this);
@@ -12451,8 +12546,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnumber);
{
// Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = SmiToFloat64(lhs);
- Node* rhs_value = LoadHeapNumberValue(rhs);
+ TNode<Float64T> lhs_value = SmiToFloat64(CAST(lhs));
+ TNode<Float64T> rhs_value = LoadHeapNumberValue(CAST(rhs));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
@@ -12468,7 +12563,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_equal);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
@@ -12480,7 +12575,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_notequal);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
@@ -12636,7 +12731,7 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
&return_true, &return_false, next_holder, if_bailout);
};
- TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ TryPrototypeChainLookup(object, object, key, lookup_property_in_holder,
lookup_element_in_holder, &return_false,
&call_runtime, &if_proxy);
@@ -13114,8 +13209,9 @@ TNode<JSArrayIterator> CodeStubAssembler::CreateArrayIterator(
return CAST(iterator);
}
-Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value,
- Node* done) {
+TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
+ SloppyTNode<Context> context, SloppyTNode<Object> value,
+ SloppyTNode<Oddball> done) {
CSA_ASSERT(this, IsBoolean(done));
Node* native_context = LoadNativeContext(context);
Node* map =
@@ -13128,7 +13224,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value,
RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
- return result;
+ return CAST(result);
}
Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
@@ -13174,9 +13270,8 @@ TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
return Construct(context, constructor, len);
}
-Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
- CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
- TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(CAST(buffer));
+TNode<BoolT> CodeStubAssembler::IsDetachedBuffer(TNode<JSArrayBuffer> buffer) {
+ TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(buffer);
return IsSetWord32<JSArrayBuffer::WasDetachedBit>(buffer_bit_field);
}
@@ -13367,7 +13462,8 @@ void CodeStubArguments::PopAndReturn(Node* value) {
value);
}
-Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsFastElementsKind(
+ TNode<Int32T> elements_kind) {
STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
return Uint32LessThanOrEqual(elements_kind,
Int32Constant(LAST_FAST_ELEMENTS_KIND));
@@ -13382,7 +13478,8 @@ TNode<BoolT> CodeStubAssembler::IsDoubleElementsKind(
Int32Constant(PACKED_DOUBLE_ELEMENTS / 2));
}
-Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsFastSmiOrTaggedElementsKind(
+ TNode<Int32T> elements_kind) {
STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND);
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND);
@@ -13390,12 +13487,14 @@ Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) {
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND));
}
-Node* CodeStubAssembler::IsFastSmiElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsFastSmiElementsKind(
+ SloppyTNode<Int32T> elements_kind) {
return Uint32LessThanOrEqual(elements_kind,
Int32Constant(HOLEY_SMI_ELEMENTS));
}
-Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKind(
+ TNode<Int32T> elements_kind) {
CSA_ASSERT(this, IsFastElementsKind(elements_kind));
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1));
@@ -13404,7 +13503,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
return IsSetWord32(elements_kind, 1);
}
-Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKindForRead(
+ TNode<Int32T> elements_kind) {
CSA_ASSERT(this,
Uint32LessThanOrEqual(elements_kind,
Int32Constant(LAST_FROZEN_ELEMENTS_KIND)));
@@ -13417,8 +13517,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) {
return IsSetWord32(elements_kind, 1);
}
-Node* CodeStubAssembler::IsElementsKindGreaterThan(
- Node* target_kind, ElementsKind reference_kind) {
+TNode<BoolT> CodeStubAssembler::IsElementsKindGreaterThan(
+ TNode<Int32T> target_kind, ElementsKind reference_kind) {
return Int32GreaterThan(target_kind, Int32Constant(reference_kind));
}
@@ -13442,14 +13542,6 @@ Node* CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
-TNode<BoolT> CodeStubAssembler::IsRuntimeCallStatsEnabled() {
- STATIC_ASSERT(sizeof(TracingFlags::runtime_stats) == kInt32Size);
- TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
- MachineType::Int32(),
- ExternalConstant(ExternalReference::address_of_runtime_stats_flag())));
- return Word32NotEqual(flag_value, Int32Constant(0));
-}
-
Node* CodeStubAssembler::IsPromiseHookEnabled() {
Node* const promise_hook = Load(
MachineType::Pointer(),
@@ -13494,8 +13586,9 @@ TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits;
TNode<WordT> table_index =
- index_shift >= 0 ? WordShl(BitcastTaggedToWord(builtin_id), index_shift)
- : WordSar(BitcastTaggedToWord(builtin_id), -index_shift);
+ index_shift >= 0
+ ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift)
+ : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift);
return CAST(
Load(MachineType::TaggedPointer(),
@@ -13637,18 +13730,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
return fun;
}
-Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function,
- StackFrame::Type frame_type) {
- return WordEqual(marker_or_function,
- IntPtrConstant(StackFrame::TypeToMarker(frame_type)));
-}
-
-Node* CodeStubAssembler::MarkerIsNotFrameType(Node* marker_or_function,
- StackFrame::Type frame_type) {
- return WordNotEqual(marker_or_function,
- IntPtrConstant(StackFrame::TypeToMarker(frame_type)));
-}
-
void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
Node* receiver_map,
Label* if_fast,
@@ -13923,7 +14004,7 @@ void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified(
if (i == 0) {
combined_details = details;
} else {
- combined_details = Unsigned(Word32And(combined_details, details));
+ combined_details = Word32And(combined_details, details);
}
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 207eb509e1..00a84c3926 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -10,9 +10,8 @@
#include "src/base/macros.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/compiler/code-assembler.h"
-#include "src/execution/frames.h"
-#include "src/execution/message-template.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
#include "src/objects/objects.h"
@@ -39,7 +38,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
PromiseSpeciesProtector) \
V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector) \
- V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector)
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
@@ -111,59 +109,45 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#endif
#ifdef DEBUG
-// Add stringified versions to the given values, except the first. That is,
-// transform
-// x, a, b, c, d, e, f
-// to
-// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f"
-//
-// __VA_ARGS__ is ignored to allow the caller to pass through too many
-// parameters, and the first element is ignored to support having no extra
-// values without empty __VA_ARGS__ (which cause all sorts of problems with
-// extra commas).
-#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \
- v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5
-
-// Stringify the given variable number of arguments. The arguments are trimmed
-// to 5 if there are too many, and padded with nullptr if there are not enough.
-#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \
- CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
- nullptr, nullptr)
-
-#define CSA_ASSERT_GET_FIRST(x, ...) (x)
-#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
+// CSA_ASSERT_ARGS generates an
+// std::initializer_list<CodeStubAssembler::ExtraNode> from __VA_ARGS__. It
+// currently supports between 0 and 2 arguments.
+
+// clang-format off
+#define CSA_ASSERT_0_ARGS(...) {}
+#define CSA_ASSERT_1_ARG(a, ...) {{a, #a}}
+#define CSA_ASSERT_2_ARGS(a, b, ...) {{a, #a}, {b, #b}}
+// clang-format on
+#define SWITCH_CSA_ASSERT_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b)
+#define CSA_ASSERT_ARGS(...) \
+ SWITCH_CSA_ASSERT_ARGS(dummy, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \
+ CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS)
// CSA_ASSERT(csa, <condition>, <extra values to print...>)
-// We have to jump through some hoops to allow <extra values to print...> to be
-// empty.
-#define CSA_ASSERT(csa, ...) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- return implicit_cast<compiler::SloppyTNode<Word32T>>( \
- EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
- }, \
- EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
- CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
+#define CSA_ASSERT(csa, condition_node, ...) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ return implicit_cast<compiler::SloppyTNode<Word32T>>(condition_node); \
+ }, \
+ #condition_node, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
// <extra values to print...>)
-#define CSA_ASSERT_BRANCH(csa, ...) \
- (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
- EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
- __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
-
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- compiler::Node* const argc = \
- (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
- "argc")
+#define CSA_ASSERT_BRANCH(csa, gen, ...) \
+ (csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
+
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ compiler::Node* const argc = \
+ (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ {{SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
+ "argc"}})
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
@@ -490,21 +474,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
-#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (SmiValuesAre32Bits()) { \
- return BitcastWordToTaggedSigned( \
- IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
- } else { \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
- Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \
- } \
+#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return BitcastWordToTaggedSigned(IntPtrOpName( \
+ BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
+ Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \
+ } \
}
SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add)
SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
@@ -523,19 +507,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> SmiShl(TNode<Smi> a, int shift) {
- return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift));
+ return BitcastWordToTaggedSigned(
+ WordShl(BitcastTaggedSignedToWord(a), shift));
}
TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
- WordAnd(WordShr(BitcastTaggedToWord(a), shift),
- BitcastTaggedToWord(SmiConstant(-1))));
+ WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift),
+ BitcastTaggedSignedToWord(SmiConstant(-1))));
}
TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
- WordAnd(WordSar(BitcastTaggedToWord(a), shift),
- BitcastTaggedToWord(SmiConstant(-1))));
+ WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift),
+ BitcastTaggedSignedToWord(SmiConstant(-1))));
}
Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
@@ -556,19 +541,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
}
-#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (SmiValuesAre32Bits()) { \
- return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
- } else { \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \
- } \
+#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return IntPtrOpName(BitcastTaggedSignedToWord(a), \
+ BitcastTaggedSignedToWord(b)); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \
+ } \
}
SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal)
SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual)
@@ -626,43 +612,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
using BranchGenerator = std::function<void(Label*, Label*)>;
using NodeGenerator = std::function<Node*()>;
-
- void Assert(const BranchGenerator& branch, const char* message = nullptr,
- const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
- void Assert(const NodeGenerator& condition_body,
- const char* message = nullptr, const char* file = nullptr,
- int line = 0, Node* extra_node1 = nullptr,
- const char* extra_node1_name = "", Node* extra_node2 = nullptr,
- const char* extra_node2_name = "", Node* extra_node3 = nullptr,
- const char* extra_node3_name = "", Node* extra_node4 = nullptr,
- const char* extra_node4_name = "", Node* extra_node5 = nullptr,
- const char* extra_node5_name = "");
- void Check(const BranchGenerator& branch, const char* message = nullptr,
- const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
- void Check(const NodeGenerator& condition_body, const char* message = nullptr,
- const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
- void FailAssert(
- const char* message = nullptr, const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
+ using ExtraNode = std::pair<Node*, const char*>;
+
+ void Assert(const BranchGenerator& branch, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void Assert(const NodeGenerator& condition_body, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void Check(const BranchGenerator& branch, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void Check(const NodeGenerator& condition_body, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void FailAssert(const char* message, const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
void FastCheck(TNode<BoolT> condition);
@@ -794,6 +759,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// otherwise goes to {if_false}.
void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
+ // Branches to {if_false} if ToBoolean applied to {value} yields false,
+ // otherwise goes to {if_true}.
+ void BranchIfToBooleanIsFalse(Node* value, Label* if_false, Label* if_true) {
+ BranchIfToBooleanIsTrue(value, if_true, if_false);
+ }
+
void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
// Branches to {if_true} when --force-slow-path flag has been passed.
@@ -811,8 +782,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
MachineType type = MachineType::AnyTagged());
// Load an object pointer from a buffer that isn't in the heap.
- Node* LoadBufferObject(Node* buffer, int offset,
- MachineType type = MachineType::AnyTagged());
+ Node* LoadBufferObject(Node* buffer, int offset, MachineType type);
+ TNode<Object> LoadBufferObject(TNode<RawPtrT> buffer, int offset) {
+ return CAST(LoadBufferObject(buffer, offset, MachineType::AnyTagged()));
+ }
TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
return UncheckedCast<RawPtrT>(
LoadBufferObject(buffer, offset, MachineType::Pointer()));
@@ -887,15 +860,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
- return CAST(LoadFromObject(MachineTypeOf<T>::value, reference.object,
- reference.offset));
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ return CAST(
+ LoadFromObject(MachineTypeOf<T>::value, reference.object, offset));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
- return UncheckedCast<T>(LoadFromObject(MachineTypeOf<T>::value,
- reference.object, reference.offset));
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ return UncheckedCast<T>(
+ LoadFromObject(MachineTypeOf<T>::value, reference.object, offset));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value,
@@ -908,15 +885,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
} else if (std::is_same<T, Map>::value) {
write_barrier = StoreToObjectWriteBarrier::kMap;
}
- StoreToObject(rep, reference.object, reference.offset, value,
- write_barrier);
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ StoreToObject(rep, reference.object, offset, value, write_barrier);
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
- StoreToObject(MachineRepresentationOf<T>::value, reference.object,
- reference.offset, value, StoreToObjectWriteBarrier::kNone);
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ StoreToObject(MachineRepresentationOf<T>::value, reference.object, offset,
+ value, StoreToObjectWriteBarrier::kNone);
}
// Tag a smi and store it.
@@ -927,7 +907,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load the Map of an HeapObject.
TNode<Map> LoadMap(SloppyTNode<HeapObject> object);
// Load the instance type of an HeapObject.
- TNode<Int32T> LoadInstanceType(SloppyTNode<HeapObject> object);
+ TNode<Uint16T> LoadInstanceType(SloppyTNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
TNode<BoolT> HasInstanceType(SloppyTNode<HeapObject> object,
InstanceType type);
@@ -967,7 +947,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load bit field 3 of a map.
TNode<Uint32T> LoadMapBitField3(SloppyTNode<Map> map);
// Load the instance type of a map.
- TNode<Int32T> LoadMapInstanceType(SloppyTNode<Map> map);
+ TNode<Uint16T> LoadMapInstanceType(SloppyTNode<Map> map);
// Load the ElementsKind of a map.
TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> object);
@@ -1023,8 +1003,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
// Loads a pointer to the sequential String char array.
Node* PointerToSeqStringData(Node* seq_string);
- // Load value field of a JSValue object.
- Node* LoadJSValueValue(Node* object);
+ // Load value field of a JSPrimitiveWrapper object.
+ Node* LoadJSPrimitiveWrapperValue(Node* object);
// Figures out whether the value of maybe_object is:
// - a SMI (jump to "if_smi", "extracted" will be the SMI value)
@@ -1076,8 +1056,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Array is any array-like type that has a fixed header followed by
// tagged elements.
- template <typename Array>
- TNode<MaybeObject> LoadArrayElement(
+ template <typename Array, typename T = MaybeObject>
+ TNode<T> LoadArrayElement(
TNode<Array> array, int array_header_size, Node* index,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
@@ -1232,15 +1212,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> LoadDoubleWithHoleCheck(
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
- Node* LoadFixedTypedArrayElementAsTagged(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+ TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
+ TNode<RawPtrT> data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
- TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind);
+ TNode<RawPtrT> data_pointer, TNode<Smi> index_node,
+ ElementsKind elements_kind) {
+ return LoadFixedTypedArrayElementAsTagged(data_pointer, index_node,
+ elements_kind, SMI_PARAMETERS);
+ }
+ TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
+ TNode<RawPtrT> data_pointer, TNode<Smi> index,
+ TNode<Int32T> elements_kind);
// Parts of the above, factored out for readability:
- Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
- Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
- Node* offset);
+ TNode<BigInt> LoadFixedBigInt64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset);
+ TNode<BigInt> LoadFixedBigUint64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset);
// 64-bit platforms only:
TNode<BigInt> BigIntFromInt64(TNode<IntPtrT> value);
TNode<BigInt> BigIntFromUint64(TNode<UintPtrT> value);
@@ -1250,10 +1238,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void StoreJSTypedArrayElementFromTagged(TNode<Context> context,
TNode<JSTypedArray> typed_array,
- TNode<Object> index_node,
+ TNode<Smi> index_node,
TNode<Object> value,
- ElementsKind elements_kind,
- ParameterMode parameter_mode);
+ ElementsKind elements_kind);
// Context manipulation
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
@@ -1534,10 +1521,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Like above, but allowing custom bitfield initialization.
TNode<BigInt> AllocateRawBigInt(TNode<IntPtrT> length);
void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<Word32T> bitfield);
- void StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+ void StoreBigIntDigit(TNode<BigInt> bigint, intptr_t digit_index,
+ TNode<UintPtrT> digit);
+ void StoreBigIntDigit(TNode<BigInt> bigint, TNode<IntPtrT> digit_index,
TNode<UintPtrT> digit);
+
TNode<Word32T> LoadBigIntBitfield(TNode<BigInt> bigint);
- TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
+ TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, intptr_t digit_index);
+ TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint,
+ TNode<IntPtrT> digit_index);
// Allocate a ByteArray with the given length.
TNode<ByteArray> AllocateByteArray(TNode<UintPtrT> length,
@@ -1573,9 +1565,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
- TNode<IntPtrT> at_least_space_for);
+ TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone);
TNode<NameDictionary> AllocateNameDictionaryWithCapacity(
- TNode<IntPtrT> capacity);
+ TNode<IntPtrT> capacity, AllocationFlags = kNone);
TNode<NameDictionary> CopyNameDictionary(TNode<NameDictionary> dictionary,
Label* large_object_fallback);
@@ -1604,9 +1596,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void InitializeStructBody(Node* object, Node* map, Node* size,
int start_offset = Struct::kHeaderSize);
- Node* AllocateJSObjectFromMap(
- Node* map, Node* properties = nullptr, Node* elements = nullptr,
- AllocationFlags flags = kNone,
+ TNode<JSObject> AllocateJSObjectFromMap(
+ SloppyTNode<Map> map, SloppyTNode<HeapObject> properties = nullptr,
+ SloppyTNode<FixedArray> elements = nullptr, AllocationFlags flags = kNone,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
@@ -1696,6 +1688,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
fixed_array_map);
}
+ TNode<Map> GetStructMap(InstanceType instance_type);
+
TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) {
return UncheckedCast<FixedArray>(AllocateFixedArray(
PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone));
@@ -1745,7 +1739,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> object,
IterationKind mode);
- Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
+ TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
+ SloppyTNode<Object> value,
+ SloppyTNode<Oddball> done);
Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
@@ -1934,6 +1930,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SMI_PARAMETERS);
}
+ TNode<FixedArray> ExtractFixedArray(
+ TNode<FixedArray> source, TNode<IntPtrT> first, TNode<IntPtrT> count,
+ TNode<IntPtrT> capacity,
+ ExtractFixedArrayFlags extract_flags =
+ ExtractFixedArrayFlag::kAllFixedArrays) {
+ return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags,
+ INTPTR_PARAMETERS));
+ }
+
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
// FixedArray, including special appropriate handling for COW arrays.
// * |source| is either a FixedArray or FixedDoubleArray from which to copy
@@ -2043,6 +2048,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> CalculateNewElementsCapacity(TNode<Smi> old_capacity) {
return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS));
}
+ TNode<IntPtrT> CalculateNewElementsCapacity(TNode<IntPtrT> old_capacity) {
+ return UncheckedCast<IntPtrT>(
+ CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS));
+ }
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
@@ -2086,19 +2095,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_bigint, Variable* var_bigint, Variable* var_feedback);
// Truncate the floating point value of a HeapNumber to an Int32.
- Node* TruncateHeapNumberValueToWord32(Node* object);
+ TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object);
// Conversions.
- void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>& output,
+ void TryHeapNumberToSmi(TNode<HeapNumber> number,
+ TVariable<Smi>& output, // NOLINT(runtime/references)
Label* if_smi);
- void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>& output,
+ void TryFloat64ToSmi(TNode<Float64T> number,
+ TVariable<Smi>& output, // NOLINT(runtime/references)
Label* if_smi);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
- TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
+ TNode<Float64T> ChangeNumberToFloat64(TNode<Number> value);
TNode<UintPtrT> TryNumberToUintPtr(TNode<Number> value, Label* if_negative);
TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value) {
return TryNumberToUintPtr(value, nullptr);
@@ -2145,10 +2156,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Throws a TypeError for {method_name} if {value} is neither of the given
- // {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or
- // returns the {value} (or wrapped value) otherwise.
- Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
- char const* method_name);
+ // {primitive_type} nor a JSPrimitiveWrapper wrapping a value of
+ // {primitive_type}, or returns the {value} (or wrapped value) otherwise.
+ TNode<Object> ToThisValue(TNode<Context> context, TNode<Object> value,
+ PrimitiveType primitive_type,
+ char const* method_name);
// Throws a TypeError for {method_name} if {value} is not of the given
// instance type. Returns {value}'s map.
@@ -2231,6 +2243,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSGlobalProxyMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSObjectInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSObjectMap(SloppyTNode<Map> map);
@@ -2246,9 +2259,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSValueInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSValueMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSValue(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSPrimitiveWrapperInstanceType(
+ SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsJSPrimitiveWrapper(SloppyTNode<HeapObject> object);
TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
TNode<BoolT> IsMutableHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
@@ -2260,6 +2274,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsOneByteStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrivateSymbol(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsPrivateName(SloppyTNode<Symbol> symbol);
TNode<BoolT> IsPromiseCapability(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyCell(SloppyTNode<HeapObject> object);
@@ -2305,7 +2320,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
- TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
+ TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid(
+ TNode<Context> native_context);
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
TNode<BoolT> IsMockArrayBufferAllocatorFlag() {
@@ -2355,7 +2371,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Word32Equal(a, b);
}
bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; }
- Node* IsFastElementsKind(Node* elements_kind);
+ TNode<BoolT> IsFastElementsKind(TNode<Int32T> elements_kind);
bool IsFastElementsKind(ElementsKind kind) {
return v8::internal::IsFastElementsKind(kind);
}
@@ -2366,12 +2382,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool IsDoubleElementsKind(ElementsKind kind) {
return v8::internal::IsDoubleElementsKind(kind);
}
- Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind);
- Node* IsFastSmiElementsKind(Node* elements_kind);
- Node* IsHoleyFastElementsKind(Node* elements_kind);
- Node* IsHoleyFastElementsKindForRead(Node* elements_kind);
- Node* IsElementsKindGreaterThan(Node* target_kind,
- ElementsKind reference_kind);
+ TNode<BoolT> IsFastSmiOrTaggedElementsKind(TNode<Int32T> elements_kind);
+ TNode<BoolT> IsFastSmiElementsKind(SloppyTNode<Int32T> elements_kind);
+ TNode<BoolT> IsHoleyFastElementsKind(TNode<Int32T> elements_kind);
+ TNode<BoolT> IsHoleyFastElementsKindForRead(TNode<Int32T> elements_kind);
+ TNode<BoolT> IsElementsKindGreaterThan(TNode<Int32T> target_kind,
+ ElementsKind reference_kind);
TNode<BoolT> IsElementsKindLessThanOrEqual(TNode<Int32T> target_kind,
ElementsKind reference_kind);
// Check if reference_kind_a <= target_kind <= reference_kind_b
@@ -2413,8 +2429,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* DerefIndirectString(TNode<String> string, TNode<Int32T> instance_type,
Label* cannot_deref);
- TNode<String> StringFromSingleCodePoint(TNode<Int32T> codepoint,
- UnicodeEncoding encoding);
+ TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
@@ -2578,7 +2593,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsSetSmi(SloppyTNode<Smi> smi, int untagged_mask) {
intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
return WordNotEqual(
- WordAnd(BitcastTaggedToWord(smi), IntPtrConstant(mask_word)),
+ WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)),
IntPtrConstant(0));
}
@@ -2950,11 +2965,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// If it can't handle the case {receiver}/{key} case then the control goes
// to {if_bailout}.
// If {if_proxy} is nullptr, proxies go to if_bailout.
- void TryPrototypeChainLookup(Node* receiver, Node* key,
+ void TryPrototypeChainLookup(Node* receiver, Node* object, Node* key,
const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder,
Label* if_end, Label* if_bailout,
- Label* if_proxy = nullptr);
+ Label* if_proxy);
// Instanceof helpers.
// Returns true if {object} has {prototype} somewhere in it's prototype
@@ -3055,7 +3070,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode, Label* bailout,
- Node* context);
+ Node* context,
+ Variable* maybe_converted_value = nullptr);
Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
Node* length, Node* key, ParameterMode mode,
@@ -3204,8 +3220,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* Equal(Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
- Node* StrictEqual(Node* lhs, Node* rhs,
- Variable* var_type_feedback = nullptr);
+ TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ Variable* var_type_feedback = nullptr);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
@@ -3248,13 +3264,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Debug helpers
Node* IsDebugActive();
- TNode<BoolT> IsRuntimeCallStatsEnabled();
-
// JSArrayBuffer helpers
TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer);
TNode<RawPtrT> LoadJSArrayBufferBackingStore(
TNode<JSArrayBuffer> array_buffer);
- Node* IsDetachedBuffer(Node* buffer);
+ TNode<BoolT> IsDetachedBuffer(TNode<JSArrayBuffer> buffer);
void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
const char* method_name);
@@ -3301,12 +3315,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* IsPromiseHookEnabledOrHasAsyncEventDelegate();
Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate();
- // Helpers for StackFrame markers.
- Node* MarkerIsFrameType(Node* marker_or_function,
- StackFrame::Type frame_type);
- Node* MarkerIsNotFrameType(Node* marker_or_function,
- StackFrame::Type frame_type);
-
// for..in helpers
void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map,
Label* if_fast, Label* if_slow);
@@ -3589,9 +3597,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
private:
// Low-level accessors for Descriptor arrays.
- TNode<MaybeObject> LoadDescriptorArrayElement(TNode<DescriptorArray> object,
- Node* index,
- int additional_offset = 0);
+ template <typename T>
+ TNode<T> LoadDescriptorArrayElement(TNode<DescriptorArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset);
};
class V8_EXPORT_PRIVATE CodeStubArguments {
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 5197dd3a2f..906eb0f0ca 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -15,8 +15,10 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/pending-optimization-table.h"
#include "src/codegen/unoptimized-compilation-info.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
@@ -24,7 +26,6 @@
#include "src/debug/liveedit.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/heap-inl.h"
@@ -319,6 +320,8 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
counters->turbofan_optimize_total_foreground()->AddSample(
static_cast<int>(time_foreground.InMicroseconds()));
}
+ counters->turbofan_ticks()->AddSample(static_cast<int>(
+ compilation_info()->tick_counter().CurrentTicks() / 1000));
}
}
@@ -593,6 +596,12 @@ MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
return MaybeHandle<SharedFunctionInfo>();
}
+ if (FLAG_stress_lazy_source_positions) {
+ // Collect source positions immediately to try and flush out bytecode
+ // mismatches.
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ }
+
if (shared_info.is_identical_to(top_level)) {
// Ensure that the top level function is retained.
*is_compiled_scope = shared_info->is_compiled_scope();
@@ -797,18 +806,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
- // If code was pending optimization for testing, delete remove the strong root
- // that was preventing the bytecode from being flushed between marking and
- // optimization.
- if (!isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
- Handle<ObjectHashTable> table =
- handle(ObjectHashTable::cast(
- isolate->heap()->pending_optimize_for_test_bytecode()),
- isolate);
- bool was_present;
- table = table->Remove(isolate, table, handle(function->shared(), isolate),
- &was_present);
- isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+ // If code was pending optimization for testing, delete remove the entry
+ // from the table that was preventing the bytecode from being flushed
+ if (V8_UNLIKELY(FLAG_testing_d8_test_runner)) {
+ PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
Handle<Code> cached_code;
@@ -1346,6 +1347,13 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
DCHECK(!isolate->has_pending_exception());
*is_compiled_scope = shared_info->is_compiled_scope();
DCHECK(is_compiled_scope->is_compiled());
+
+ if (FLAG_stress_lazy_source_positions) {
+ // Collect source positions immediately to try and flush out bytecode
+ // mismatches.
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ }
+
return true;
}
@@ -1599,33 +1607,103 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
-bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context,
- Handle<String> source) {
+// Check whether embedder allows code generation in this context.
+// (via v8::Isolate::SetAllowCodeGenerationFromStringsCallback)
+bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
+ Handle<String> source) {
DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate));
- // Check with callback if set.
+ DCHECK(isolate->allow_code_gen_callback());
+
+ // Callback set. Let it decide if code generation is allowed.
+ VMState<EXTERNAL> state(isolate);
+ RuntimeCallTimerScope timer(
+ isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
- if (callback == nullptr) {
- // No callback set and code generation disallowed.
- return false;
- } else {
- // Callback set. Let it decide if code generation is allowed.
- VMState<EXTERNAL> state(isolate);
- return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
+ return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
+}
+
+// Check whether embedder allows code generation in this context.
+// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback)
+bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
+ Handle<i::Object>* source) {
+ DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate));
+ DCHECK(isolate->modify_code_gen_callback());
+ DCHECK(source);
+
+ // Callback set. Run it, and use the return value as source, or block
+ // execution if it's not set.
+ VMState<EXTERNAL> state(isolate);
+ ModifyCodeGenerationFromStringsCallback modify_callback =
+ isolate->modify_code_gen_callback();
+ RuntimeCallTimerScope timer(
+ isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
+ MaybeLocal<v8::String> modified_source =
+ modify_callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(*source));
+ if (modified_source.IsEmpty()) return false;
+
+ // Use the new source (which might be the same as the old source) and return.
+ *source = Utils::OpenHandle(*modified_source.ToLocalChecked(), false);
+ return true;
+}
+
+// Run Embedder-mandated checks before generating code from a string.
+//
+// Returns a string to be used for compilation, or a flag that an object type
+// was encountered that is neither a string, nor something the embedder knows
+// how to handle.
+//
+// Returns: (assuming: std::tie(source, unknown_object))
+// - !source.is_null(): compilation allowed, source contains the source string.
+// - unknown_object is true: compilation allowed, but we don't know how to
+// deal with source_object.
+// - source.is_null() && !unknown_object: compilation should be blocked.
+//
+// - !source_is_null() and unknown_object can't be true at the same time.
+std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
+ Isolate* isolate, Handle<Context> context,
+ Handle<i::Object> source_object) {
+ Handle<String> source;
+ if (source_object->IsString()) source = Handle<String>::cast(source_object);
+
+ // Check if the context unconditionally allows code gen from strings.
+ // allow_code_gen_from_strings can be many things, so we'll always check
+ // against the 'false' literal, so that e.g. undefined and 'true' are treated
+ // the same.
+ if (!context->allow_code_gen_from_strings().IsFalse(isolate)) {
+ return {source, !source_object->IsString()};
+ }
+
+ // Check if the context allows code generation for this string.
+ // allow_code_gen_callback only allows proper strings.
+ // (I.e., let allow_code_gen_callback decide, if it has been set.)
+ if (isolate->allow_code_gen_callback()) {
+ if (source_object->IsString() &&
+ CodeGenerationFromStringsAllowed(isolate, context, source)) {
+ return {source, !source_object->IsString()};
+ }
+ }
+
+ // Check if the context wants to block or modify this source object.
+ // Double-check that we really have a string now.
+ // (Let modify_code_gen_callback decide, if it's been set.)
+ if (isolate->modify_code_gen_callback()) {
+ if (ModifyCodeGenerationFromStrings(isolate, context, &source_object) &&
+ source_object->IsString())
+ return {Handle<String>::cast(source_object), false};
}
+
+ return {MaybeHandle<String>(), !source_object->IsString()};
}
-MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
- Handle<Context> context, Handle<String> source,
+MaybeHandle<JSFunction> Compiler::GetFunctionFromValidatedString(
+ Handle<Context> context, MaybeHandle<String> source,
ParseRestriction restriction, int parameters_end_pos) {
Isolate* const isolate = context->GetIsolate();
Handle<Context> native_context(context->native_context(), isolate);
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings().IsFalse(isolate) &&
- !CodeGenerationFromStringsAllowed(isolate, native_context, source)) {
+ // Raise an EvalError if we did not receive a string.
+ if (source.is_null()) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
THROW_NEW_ERROR(
@@ -1639,9 +1717,20 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
int eval_position = kNoSourcePosition;
Handle<SharedFunctionInfo> outer_info(
native_context->empty_function().shared(), isolate);
- return Compiler::GetFunctionFromEval(
- source, outer_info, native_context, LanguageMode::kSloppy, restriction,
- parameters_end_pos, eval_scope_position, eval_position);
+ return Compiler::GetFunctionFromEval(source.ToHandleChecked(), outer_info,
+ native_context, LanguageMode::kSloppy,
+ restriction, parameters_end_pos,
+ eval_scope_position, eval_position);
+}
+
+MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
+ Handle<Context> context, Handle<Object> source,
+ ParseRestriction restriction, int parameters_end_pos) {
+ Isolate* const isolate = context->GetIsolate();
+ Handle<Context> native_context(context->native_context(), isolate);
+ return GetFunctionFromValidatedString(
+ context, ValidateDynamicCompilationSource(isolate, context, source).first,
+ restriction, parameters_end_pos);
}
namespace {
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index a598706373..836f738123 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -132,17 +132,22 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
v8::ScriptCompiler::CompileOptions compile_options,
v8::ScriptCompiler::NoCacheReason no_cache_reason);
- // Returns true if the embedder permits compiling the given source string in
- // the given context.
- static bool CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context,
- Handle<String> source);
-
// Create a (bound) function for a String source within a context for eval.
V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
- Handle<Context> context, Handle<String> source,
+ Handle<Context> context, Handle<i::Object> source,
ParseRestriction restriction, int parameters_end_pos);
+ // Decompose GetFunctionFromString into two functions, to allow callers to
+ // deal seperately with a case of object not handled by the embedder.
+ V8_WARN_UNUSED_RESULT static std::pair<MaybeHandle<String>, bool>
+ ValidateDynamicCompilationSource(Isolate* isolate, Handle<Context> context,
+ Handle<i::Object> source_object);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction>
+ GetFunctionFromValidatedString(Handle<Context> context,
+ MaybeHandle<String> source,
+ ParseRestriction restriction,
+ int parameters_end_pos);
+
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 613a142f24..6816c5b7ad 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/codegen/constant-pool.h"
+#include "src/codegen/assembler-arch.h"
#include "src/codegen/assembler-inl.h"
namespace v8 {
@@ -210,5 +211,253 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
#endif // defined(V8_TARGET_ARCH_PPC)
+#if defined(V8_TARGET_ARCH_ARM64)
+
+// Constant Pool.
+
+ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
+ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
+
+RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
+ RelocInfo::Mode rmode) {
+ ConstantPoolKey key(data, rmode);
+ CHECK(key.is_value32());
+ return RecordKey(std::move(key), assm_->pc_offset());
+}
+
+RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
+ RelocInfo::Mode rmode) {
+ ConstantPoolKey key(data, rmode);
+ CHECK(!key.is_value32());
+ return RecordKey(std::move(key), assm_->pc_offset());
+}
+
+RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
+ RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
+ if (write_reloc_info == RelocInfoStatus::kMustRecord) {
+ if (key.is_value32()) {
+ if (entry32_count_ == 0) first_use_32_ = offset;
+ ++entry32_count_;
+ } else {
+ if (entry64_count_ == 0) first_use_64_ = offset;
+ ++entry64_count_;
+ }
+ }
+ entries_.insert(std::make_pair(key, offset));
+
+ if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
+ // Request constant pool emission after the next instruction.
+ SetNextCheckIn(1);
+ }
+
+ return write_reloc_info;
+}
+
+RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
+ const ConstantPoolKey& key) {
+ if (key.AllowsDeduplication()) {
+ auto existing = entries_.find(key);
+ if (existing != entries_.end()) {
+ return RelocInfoStatus::kMustOmitForDuplicate;
+ }
+ }
+ return RelocInfoStatus::kMustRecord;
+}
+
+void ConstantPool::EmitAndClear(Jump require_jump) {
+ DCHECK(!IsBlocked());
+ // Prevent recursive pool emission.
+ Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
+ Alignment require_alignment =
+ IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
+ int size = ComputeSize(require_jump, require_alignment);
+ Label size_check;
+ assm_->bind(&size_check);
+ assm_->RecordConstPool(size);
+
+ // Emit the constant pool. It is preceded by an optional branch if
+ // {require_jump} and a header which will:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // 3) align the 64bit pool entries to 64-bit.
+ // TODO(all): Make the alignment part less fragile. Currently code is
+ // allocated as a byte array so there are no guarantees the alignment will
+ // be preserved on compaction. Currently it works as allocation seems to be
+ // 64-bit aligned.
+
+ Label after_pool;
+ if (require_jump == Jump::kRequired) assm_->b(&after_pool);
+
+ assm_->RecordComment("[ Constant Pool");
+ EmitPrologue(require_alignment);
+ if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
+ EmitEntries();
+ assm_->RecordComment("]");
+
+ if (after_pool.is_linked()) assm_->bind(&after_pool);
+
+ DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
+ Clear();
+}
+
+void ConstantPool::Clear() {
+ entries_.clear();
+ first_use_32_ = -1;
+ first_use_64_ = -1;
+ entry32_count_ = 0;
+ entry64_count_ = 0;
+ next_check_ = 0;
+}
+
+void ConstantPool::StartBlock() {
+ if (blocked_nesting_ == 0) {
+ // Prevent constant pool checks from happening by setting the next check to
+ // the biggest possible offset.
+ next_check_ = kMaxInt;
+ }
+ ++blocked_nesting_;
+}
+
+void ConstantPool::EndBlock() {
+ --blocked_nesting_;
+ if (blocked_nesting_ == 0) {
+ DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
+ // Make sure a check happens quickly after getting unblocked.
+ next_check_ = 0;
+ }
+}
+
+bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
+
+void ConstantPool::SetNextCheckIn(size_t instructions) {
+ next_check_ =
+ assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
+}
+
+void ConstantPool::EmitEntries() {
+ for (auto iter = entries_.begin(); iter != entries_.end();) {
+ DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
+ auto range = entries_.equal_range(iter->first);
+ bool shared = iter->first.AllowsDeduplication();
+ for (auto it = range.first; it != range.second; ++it) {
+ SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
+ if (!shared) Emit(it->first);
+ }
+ if (shared) Emit(iter->first);
+ iter = range.second;
+ }
+}
+
+void ConstantPool::Emit(const ConstantPoolKey& key) {
+ if (key.is_value32()) {
+ assm_->dd(key.value32());
+ } else {
+ assm_->dq(key.value64());
+ }
+}
+
+bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
+ if (IsEmpty()) return false;
+ if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
+ return true;
+ }
+ // We compute {dist32/64}, i.e. the distance from the first instruction
+ // accessing a 32bit/64bit entry in the constant pool to any of the
+ // 32bit/64bit constant pool entries, respectively. This is required because
+ // we do not guarantee that entries are emitted in order of reference, i.e. it
+ // is possible that the entry with the earliest reference is emitted last.
+ // The constant pool should be emitted if either of the following is true:
+ // (A) {dist32/64} will be out of range at the next check in.
+ // (B) Emission can be done behind an unconditional branch and {dist32/64}
+ // exceeds {kOpportunityDist*}.
+ // (C) {dist32/64} exceeds the desired approximate distance to the pool.
+ int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
+ size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
+ size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
+ if (Entry64Count() != 0) {
+ // The 64-bit constants are always emitted before the 32-bit constants, so
+ // we subtract the size of the 32-bit constants from {size}.
+ size_t dist64 = pool_end_64 - first_use_64_;
+ bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
+ bool opportune_emission_without_jump =
+ require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
+ bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
+ if (next_check_too_late || opportune_emission_without_jump ||
+ approximate_distance_exceeded) {
+ return true;
+ }
+ }
+ if (Entry32Count() != 0) {
+ size_t dist32 = pool_end_32 - first_use_32_;
+ bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
+ bool opportune_emission_without_jump =
+ require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
+ bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
+ if (next_check_too_late || opportune_emission_without_jump ||
+ approximate_distance_exceeded) {
+ return true;
+ }
+ }
+ return false;
+}
+
+int ConstantPool::ComputeSize(Jump require_jump,
+ Alignment require_alignment) const {
+ int size_up_to_marker = PrologueSize(require_jump);
+ int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
+ size_t size_after_marker =
+ Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
+ return size_up_to_marker + static_cast<int>(size_after_marker);
+}
+
+Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
+ int pc_offset) const {
+ int size_up_to_marker = PrologueSize(require_jump);
+ if (Entry64Count() != 0 &&
+ !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
+ return Alignment::kRequired;
+ }
+ return Alignment::kOmitted;
+}
+
+bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
+ // Check that all entries are in range if the pool is emitted at {pc_offset}.
+ // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
+ // and over-estimates the last entry's address with the pool's end.
+ Alignment require_alignment =
+ IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
+ size_t pool_end_32 =
+ pc_offset + ComputeSize(Jump::kRequired, require_alignment);
+ size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
+ bool entries_in_range_32 =
+ Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
+ bool entries_in_range_64 =
+ Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
+ return entries_in_range_32 && entries_in_range_64;
+}
+
+ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
+ : pool_(&assm->constpool_) {
+ pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
+ pool_->StartBlock();
+}
+
+ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
+ : pool_(&assm->constpool_) {
+ DCHECK_EQ(check, PoolEmissionCheck::kSkip);
+ pool_->StartBlock();
+}
+
+ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
+
+void ConstantPool::MaybeCheck() {
+ if (assm_->pc_offset() >= next_check_) {
+ Check(Emission::kIfNeeded, Jump::kRequired);
+ }
+}
+
+#endif // defined(V8_TARGET_ARCH_ARM64)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index 4399f6fc1f..d07452336b 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+class Instruction;
+
// -----------------------------------------------------------------------------
// Constant pool support
@@ -136,8 +138,9 @@ class ConstantPoolBuilder {
inline Label* EmittedPosition() { return &emitted_label_; }
private:
- ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
- ConstantPoolEntry::Type type);
+ ConstantPoolEntry::Access AddEntry(
+ ConstantPoolEntry& entry, // NOLINT(runtime/references)
+ ConstantPoolEntry::Type type);
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);
@@ -161,6 +164,189 @@ class ConstantPoolBuilder {
#endif // defined(V8_TARGET_ARCH_PPC)
+#if defined(V8_TARGET_ARCH_ARM64)
+
+class ConstantPoolKey {
+ public:
+ explicit ConstantPoolKey(uint64_t value,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : is_value32_(false), value64_(value), rmode_(rmode) {}
+
+ explicit ConstantPoolKey(uint32_t value,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : is_value32_(true), value32_(value), rmode_(rmode) {}
+
+ uint64_t value64() const {
+ CHECK(!is_value32_);
+ return value64_;
+ }
+ uint32_t value32() const {
+ CHECK(is_value32_);
+ return value32_;
+ }
+
+ bool is_value32() const { return is_value32_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ bool AllowsDeduplication() const {
+ DCHECK(rmode_ != RelocInfo::CONST_POOL &&
+ rmode_ != RelocInfo::VENEER_POOL &&
+ rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
+ rmode_ != RelocInfo::DEOPT_INLINING_ID &&
+ rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID);
+ // CODE_TARGETs can be shared because they aren't patched anymore,
+ // and we make sure we emit only one reloc info for them (thus delta
+ // patching) will apply the delta only once. At the moment, we do not dedup
+ // code targets if they are wrapped in a heap object request (value == 0).
+ bool is_sharable_code_target =
+ rmode_ == RelocInfo::CODE_TARGET &&
+ (is_value32() ? (value32() != 0) : (value64() != 0));
+ bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_);
+ return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target ||
+ is_sharable_embedded_object;
+ }
+
+ private:
+ bool is_value32_;
+ union {
+ uint64_t value64_;
+ uint32_t value32_;
+ };
+ RelocInfo::Mode rmode_;
+};
+
+// Order for pool entries. 64bit entries go first.
+inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
+ if (a.is_value32() < b.is_value32()) return true;
+ if (a.is_value32() > b.is_value32()) return false;
+ if (a.rmode() < b.rmode()) return true;
+ if (a.rmode() > b.rmode()) return false;
+ if (a.is_value32()) return a.value32() < b.value32();
+ return a.value64() < b.value64();
+}
+
+inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
+ if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
+ return false;
+ }
+ if (a.is_value32()) return a.value32() == b.value32();
+ return a.value64() == b.value64();
+}
+
+// Constant pool generation
+enum class Jump { kOmitted, kRequired };
+enum class Emission { kIfNeeded, kForced };
+enum class Alignment { kOmitted, kRequired };
+enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
+enum class PoolEmissionCheck { kSkip };
+
+// Pools are emitted in the instruction stream, preferably after unconditional
+// jumps or after returns from functions (in dead code locations).
+// If a long code sequence does not contain unconditional jumps, it is
+// necessary to emit the constant pool before the pool gets too far from the
+// location it is accessed from. In this case, we emit a jump over the emitted
+// constant pool.
+// Constants in the pool may be addresses of functions that gets relocated;
+// if so, a relocation info entry is associated to the constant pool entry.
+class ConstantPool {
+ public:
+ explicit ConstantPool(Assembler* assm);
+ ~ConstantPool();
+
+ // Returns true when we need to write RelocInfo and false when we do not.
+ RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
+ RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
+
+ size_t Entry32Count() const { return entry32_count_; }
+ size_t Entry64Count() const { return entry64_count_; }
+ bool IsEmpty() const { return entries_.empty(); }
+ // Check if pool will be out of range at {pc_offset}.
+ bool IsInImmRangeIfEmittedAt(int pc_offset);
+ // Size in bytes of the constant pool. Depending on parameters, the size will
+ // include the branch over the pool and alignment padding.
+ int ComputeSize(Jump require_jump, Alignment require_alignment) const;
+
+ // Emit the pool at the current pc with a branch over the pool if requested.
+ void EmitAndClear(Jump require);
+ bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
+ V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
+ size_t margin = 0);
+
+ V8_EXPORT_PRIVATE void MaybeCheck();
+ void Clear();
+
+ // Constant pool emisssion can be blocked temporarily.
+ bool IsBlocked() const;
+
+ // Repeated checking whether the constant pool should be emitted is expensive;
+ // only check once a number of instructions have been generated.
+ void SetNextCheckIn(size_t instructions);
+
+ // Class for scoping postponing the constant pool generation.
+ class V8_EXPORT_PRIVATE BlockScope {
+ public:
+ // BlockScope immediatelly emits the pool if necessary to ensure that
+ // during the block scope at least {margin} bytes can be emitted without
+ // pool emission becomming necessary.
+ explicit BlockScope(Assembler* pool, size_t margin = 0);
+ BlockScope(Assembler* pool, PoolEmissionCheck);
+ ~BlockScope();
+
+ private:
+ ConstantPool* pool_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
+ };
+
+ // Hard limit to the const pool which must not be exceeded.
+ static const size_t kMaxDistToPool32;
+ static const size_t kMaxDistToPool64;
+ // Approximate distance where the pool should be emitted.
+ static const size_t kApproxDistToPool32;
+ V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
+ // Approximate distance where the pool may be emitted if
+ // no jump is required (due to a recent unconditional jump).
+ static const size_t kOpportunityDistToPool32;
+ static const size_t kOpportunityDistToPool64;
+ // PC distance between constant pool checks.
+ V8_EXPORT_PRIVATE static const size_t kCheckInterval;
+ // Number of entries in the pool which trigger a check.
+ static const size_t kApproxMaxEntryCount;
+
+ private:
+ void StartBlock();
+ void EndBlock();
+
+ void EmitEntries();
+ void EmitPrologue(Alignment require_alignment);
+ int PrologueSize(Jump require_jump) const;
+ RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
+ RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
+ void Emit(const ConstantPoolKey& key);
+ void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
+ const ConstantPoolKey& key);
+ Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
+ int pc_offset) const;
+
+ Assembler* assm_;
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_use_32_ = -1;
+ int first_use_64_ = -1;
+ // We sort not according to insertion order, but since we do not insert
+ // addresses (for heap objects we insert an index which is created in
+ // increasing order), the order is deterministic. We map each entry to the
+ // pc offset of the load. We use a multimap because we need to record the
+ // pc offset of each load of the same constant so that the immediate of the
+ // loads can be back-patched when the pool is emitted.
+ std::multimap<ConstantPoolKey, int> entries_;
+ size_t entry32_count_ = 0;
+ size_t entry64_count_ = 0;
+ int next_check_ = 0;
+ int blocked_nesting_ = 0;
+};
+
+#endif // defined(V8_TARGET_ARCH_ARM64)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index b2f792e339..dae9992c57 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -14,6 +14,7 @@ namespace internal {
// CPU feature flags.
enum CpuFeature {
// x86
+ SSE4_2,
SSE4_1,
SSSE3,
SSE3,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 5538f361f0..c077407931 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -26,31 +26,11 @@
#include "src/logging/log.h"
#include "src/numbers/math-random.h"
#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-stack.h"
#include "src/strings/string-search.h"
#include "src/wasm/wasm-external-refs.h"
-// Include native regexp-macro-assembler.
-#if V8_TARGET_ARCH_IA32
-#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
-#else // Unknown architecture.
-#error "Unknown architecture."
-#endif // Target architecture.
-
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif // V8_INTL_SUPPORT
@@ -671,6 +651,15 @@ static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x,
FUNCTION_REFERENCE(smi_lexicographic_compare_function,
LexicographicCompareWrapper)
+FUNCTION_REFERENCE(mutable_big_int_absolute_add_and_canonicalize_function,
+ MutableBigInt_AbsoluteAddAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_absolute_compare_function,
+ MutableBigInt_AbsoluteCompare)
+
+FUNCTION_REFERENCE(mutable_big_int_absolute_sub_and_canonicalize_function,
+ MutableBigInt_AbsoluteSubAndCanonicalize)
+
FUNCTION_REFERENCE(check_object_type, CheckObjectType)
#ifdef V8_INTL_SUPPORT
@@ -786,6 +775,12 @@ ExternalReference ExternalReference::fast_c_call_caller_pc_address(
isolate->isolate_data()->fast_c_call_caller_pc_address());
}
+ExternalReference ExternalReference::stack_is_iterable_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->isolate_data()->stack_is_iterable_address());
+}
+
FUNCTION_REFERENCE(call_enqueue_microtask_function,
MicrotaskQueue::CallEnqueueMicrotask)
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 4c83a9b33a..b663ae1621 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -72,6 +72,7 @@ class StatsCounter;
"IsolateData::fast_c_call_caller_fp_address") \
V(fast_c_call_caller_pc_address, \
"IsolateData::fast_c_call_caller_pc_address") \
+ V(stack_is_iterable_address, "IsolateData::stack_is_iterable_address") \
V(address_of_regexp_stack_limit, "RegExpStack::limit_address()") \
V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()") \
V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()") \
@@ -149,6 +150,12 @@ class StatsCounter;
V(libc_memmove_function, "libc_memmove") \
V(libc_memset_function, "libc_memset") \
V(mod_two_doubles_operation, "mod_two_doubles") \
+ V(mutable_big_int_absolute_add_and_canonicalize_function, \
+ "MutableBigInt_AbsoluteAddAndCanonicalize") \
+ V(mutable_big_int_absolute_compare_function, \
+ "MutableBigInt_AbsoluteCompare") \
+ V(mutable_big_int_absolute_sub_and_canonicalize_function, \
+ "MutableBigInt_AbsoluteSubAndCanonicalize") \
V(new_deoptimizer_function, "Deoptimizer::New()") \
V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
V(printf_function, "printf") \
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index 12a05e1fba..4f94746ea5 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -15,31 +15,41 @@ namespace internal {
HandlerTable::HandlerTable(Code code)
: HandlerTable(code.InstructionStart() + code.handler_table_offset(),
- code.handler_table_size()) {}
+ code.handler_table_size(), kReturnAddressBasedEncoding) {}
HandlerTable::HandlerTable(BytecodeArray bytecode_array)
: HandlerTable(bytecode_array.handler_table()) {}
HandlerTable::HandlerTable(ByteArray byte_array)
- : number_of_entries_(byte_array.length() / kRangeEntrySize /
- sizeof(int32_t)),
-#ifdef DEBUG
- mode_(kRangeBasedEncoding),
-#endif
- raw_encoded_data_(
- reinterpret_cast<Address>(byte_array.GetDataStartAddress())) {
- DCHECK_EQ(0, byte_array.length() % (kRangeEntrySize * sizeof(int32_t)));
-}
+ : HandlerTable(reinterpret_cast<Address>(byte_array.GetDataStartAddress()),
+ byte_array.length(), kRangeBasedEncoding) {}
-HandlerTable::HandlerTable(Address handler_table, int handler_table_size)
- : number_of_entries_(handler_table_size / kReturnEntrySize /
+HandlerTable::HandlerTable(Address handler_table, int handler_table_size,
+ EncodingMode encoding_mode)
+ : number_of_entries_(handler_table_size / EntrySizeFromMode(encoding_mode) /
sizeof(int32_t)),
#ifdef DEBUG
- mode_(kReturnAddressBasedEncoding),
+ mode_(encoding_mode),
#endif
raw_encoded_data_(handler_table) {
+ // Check padding.
static_assert(4 < kReturnEntrySize * sizeof(int32_t), "allowed padding");
- DCHECK_GE(4, handler_table_size % (kReturnEntrySize * sizeof(int32_t)));
+ // For return address encoding, maximum padding is 4; otherwise, there should
+ // be no padding.
+ DCHECK_GE(kReturnAddressBasedEncoding == encoding_mode ? 4 : 0,
+ handler_table_size %
+ (EntrySizeFromMode(encoding_mode) * sizeof(int32_t)));
+}
+
+// static
+int HandlerTable::EntrySizeFromMode(EncodingMode mode) {
+ switch (mode) {
+ case kReturnAddressBasedEncoding:
+ return kReturnEntrySize;
+ case kRangeBasedEncoding:
+ return kRangeEntrySize;
+ }
+ UNREACHABLE();
}
int HandlerTable::GetRangeStart(int index) const {
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index eaa062873b..362412525d 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -45,11 +45,14 @@ class V8_EXPORT_PRIVATE HandlerTable {
// async/await handling in the debugger can take place.
};
+ enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
+
// Constructors for the various encodings.
explicit HandlerTable(Code code);
explicit HandlerTable(ByteArray byte_array);
explicit HandlerTable(BytecodeArray bytecode_array);
- explicit HandlerTable(Address handler_table, int handler_table_size);
+ HandlerTable(Address handler_table, int handler_table_size,
+ EncodingMode encoding_mode);
// Getters for handler table based on ranges.
int GetRangeStart(int index) const;
@@ -88,11 +91,12 @@ class V8_EXPORT_PRIVATE HandlerTable {
#endif
private:
- enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
-
// Getters for handler table based on ranges.
CatchPrediction GetRangePrediction(int index) const;
+ // Gets entry size based on mode.
+ static int EntrySizeFromMode(EncodingMode mode);
+
// Getters for handler table based on return addresses.
int GetReturnOffset(int index) const;
int GetReturnHandler(int index) const;
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 99d38890e3..aefcab7299 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -756,6 +756,13 @@ void Assembler::cmpxchg8b(Operand dst) {
emit_operand(ecx, dst);
}
+void Assembler::mfence() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAE);
+ EMIT(0xF0);
+}
+
void Assembler::lfence() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index d2dcb0f348..2423f73bdb 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -542,6 +542,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmpxchg8b(Operand dst);
// Memory Fence
+ void mfence();
void lfence();
void pause();
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 6a0be9386e..f6f0153e54 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -1887,20 +1887,24 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below (we use
// times_half_system_pointer_size instead of times_system_pointer_size since
// smis are already shifted by one).
- mov(builtin_pointer,
- Operand(kRootRegister, builtin_pointer, times_half_system_pointer_size,
+ mov(builtin_index,
+ Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
IsolateData::builtin_entry_table_offset()));
- call(builtin_pointer);
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 345ae815af..9b13e87447 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -87,7 +87,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target) { call(target); }
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index f8f874359b..5934c80a7d 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -252,6 +252,11 @@ void StringAtDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void StringAtAsStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void StringSubstringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index d166b477d8..f6c1adfe47 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -74,6 +74,7 @@ namespace internal {
V(StoreTransition) \
V(StoreWithVector) \
V(StringAt) \
+ V(StringAtAsString) \
V(StringSubstring) \
V(TypeConversion) \
V(TypeConversionStackParameter) \
@@ -969,6 +970,17 @@ class StringAtDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(StringAtDescriptor, CallInterfaceDescriptor)
};
+class StringAtAsStringDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kPosition)
+ // TODO(turbofan): Return untagged value here.
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::TaggedPointer(), // result string
+ MachineType::AnyTagged(), // kReceiver
+ MachineType::IntPtr()) // kPosition
+ DECLARE_DESCRIPTOR(StringAtAsStringDescriptor, CallInterfaceDescriptor)
+};
+
class StringSubstringDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kString, kFrom, kTo)
diff --git a/deps/v8/src/codegen/label.h b/deps/v8/src/codegen/label.h
index 430958d190..f45f1e62d7 100644
--- a/deps/v8/src/codegen/label.h
+++ b/deps/v8/src/codegen/label.h
@@ -99,7 +99,7 @@ class Label {
friend class Assembler;
friend class Displacement;
- friend class RegExpMacroAssemblerIrregexp;
+ friend class RegExpBytecodeGenerator;
// Disallow copy construction and assignment, but allow move construction and
// move assignment on selected platforms (see above).
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index d6337aefb6..423da2fb65 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -39,6 +39,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/codegen/mips/assembler-mips-inl.h"
+#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/heap-number-inl.h"
@@ -2211,7 +2212,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
emit(break_instr);
}
-void Assembler::stop(const char* msg, uint32_t code) {
+void Assembler::stop(uint32_t code) {
DCHECK_GT(code, kMaxWatchpointCode);
DCHECK_LE(code, kMaxStopCode);
#if V8_HOST_ARCH_MIPS
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 640e11cf1a..86a07ab06e 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -558,7 +558,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Break / Trap instructions.
void break_(uint32_t code, bool break_as_stop = false);
- void stop(const char* msg, uint32_t code = kMaxStopCode);
+ void stop(uint32_t code = kMaxStopCode);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
@@ -1478,11 +1478,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
- static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
- int16_t& jic_offset);
- static void UnpackTargetAddressUnsigned(uint32_t address,
- uint32_t& lui_offset,
- uint32_t& jic_offset);
+ static void UnpackTargetAddress(
+ uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references)
+ int16_t& jic_offset); // NOLINT(runtime/references)
+ static void UnpackTargetAddressUnsigned(
+ uint32_t address,
+ uint32_t& lui_offset, // NOLINT(runtime/references)
+ uint32_t& jic_offset); // NOLINT(runtime/references)
static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
@@ -1513,7 +1515,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src,
+ MemOperand& src, // NOLINT(runtime/references)
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 483b7e895b..79373c1b5b 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -189,7 +189,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -3974,18 +3974,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
- SmiUntag(builtin_pointer, builtin_pointer);
- Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
- lw(builtin_pointer,
- MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
- Call(builtin_pointer);
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ Lsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2);
+ lw(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
@@ -4111,6 +4115,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+void TurboAssembler::LoadAddress(Register dst, Label* target) {
+ uint32_t address = jump_address(target);
+ li(dst, address);
+}
+
void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4694,15 +4703,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -4938,7 +4947,7 @@ void MacroAssembler::AssertStackIsAligned() {
andi(scratch, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
- stop("Unexpected stack alignment");
+ stop();
bind(&alignment_as_expected);
}
}
@@ -5352,7 +5361,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
- stop("Unexpected alignment in CallCFunction");
+ stop();
bind(&alignment_as_expected);
}
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index f394e01769..3dfc7bfbad 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -212,8 +212,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
+ void LoadAddress(Register dst, Label* target);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination,
Register code_object) override {
@@ -841,9 +845,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits,
+ Register& scratch, // NOLINT(runtime/references)
+ const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index cb8e3dd7d1..801faf6306 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -38,6 +38,7 @@
#include "src/base/cpu.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
+#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/heap-number-inl.h"
@@ -2344,7 +2345,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
emit(break_instr);
}
-void Assembler::stop(const char* msg, uint32_t code) {
+void Assembler::stop(uint32_t code) {
DCHECK_GT(code, kMaxWatchpointCode);
DCHECK_LE(code, kMaxStopCode);
#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index c7c027eef7..a22ddf0e7d 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -601,7 +601,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Break / Trap instructions.
void break_(uint32_t code, bool break_as_stop = false);
- void stop(const char* msg, uint32_t code = kMaxStopCode);
+ void stop(uint32_t code = kMaxStopCode);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
@@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src,
+ MemOperand& src, // NOLINT(runtime/references)
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 65c0b592eb..97e5af1fa8 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -187,7 +187,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -4274,18 +4274,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
- SmiUntag(builtin_pointer, builtin_pointer);
- Dlsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
- Ld(builtin_pointer,
- MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
- Call(builtin_pointer);
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ Dlsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2);
+ Ld(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
@@ -4433,6 +4437,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+void TurboAssembler::LoadAddress(Register dst, Label* target) {
+ uint64_t address = jump_address(target);
+ li(dst, address);
+}
+
void TurboAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5026,15 +5035,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -5273,7 +5282,7 @@ void MacroAssembler::AssertStackIsAligned() {
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort re-entering here.
- stop("Unexpected stack alignment");
+ stop();
bind(&alignment_as_expected);
}
}
@@ -5698,7 +5707,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
- stop("Unexpected alignment in CallCFunction");
+ stop();
bind(&alignment_as_expected);
}
}
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index d0f9b7f5bc..eb62bec0e8 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -234,8 +234,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
+ void LoadAddress(Register dst, Label* target);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination,
Register code_object) override {
@@ -845,9 +849,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits,
+ Register& scratch, // NOLINT(runtime/references)
+ const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 596d5c261e..f3582d868a 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -75,9 +75,15 @@ void OptimizedCompilationInfo::ConfigureFlags() {
break;
case Code::BYTECODE_HANDLER:
SetFlag(kCalledWithCodeStartRegister);
+ if (FLAG_turbo_splitting) {
+ MarkAsSplittingEnabled();
+ }
break;
case Code::BUILTIN:
case Code::STUB:
+ if (FLAG_turbo_splitting) {
+ MarkAsSplittingEnabled();
+ }
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
MarkAsSourcePositionsEnabled();
#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
@@ -177,6 +183,8 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
return StackFrame::WASM_TO_JS;
case Code::WASM_INTERPRETER_ENTRY:
return StackFrame::WASM_INTERPRETER_ENTRY;
+ case Code::C_WASM_ENTRY:
+ return StackFrame::C_WASM_ENTRY;
default:
UNIMPLEMENTED();
return StackFrame::NONE;
@@ -206,7 +214,7 @@ bool OptimizedCompilationInfo::has_native_context() const {
return !closure().is_null() && !closure()->native_context().is_null();
}
-Context OptimizedCompilationInfo::native_context() const {
+NativeContext OptimizedCompilationInfo::native_context() const {
DCHECK(has_native_context());
return closure()->native_context();
}
@@ -234,6 +242,8 @@ void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) {
if (FLAG_trace_turbo) SetFlag(kTraceTurboJson);
if (FLAG_trace_turbo_graph) SetFlag(kTraceTurboGraph);
if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled);
+ if (FLAG_trace_turbo_alloc) SetFlag(kTraceTurboAllocation);
+ if (FLAG_trace_heap_broker) SetFlag(kTraceHeapBroker);
}
OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder(
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index eca3a8fa32..624517283e 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -9,6 +9,7 @@
#include "src/codegen/bailout-reason.h"
#include "src/codegen/source-position-table.h"
+#include "src/codegen/tick-counter.h"
#include "src/common/globals.h"
#include "src/execution/frames.h"
#include "src/handles/handles.h"
@@ -60,9 +61,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
kTraceTurboJson = 1 << 14,
kTraceTurboGraph = 1 << 15,
kTraceTurboScheduled = 1 << 16,
- kWasmRuntimeExceptionSupport = 1 << 17,
- kTurboControlFlowAwareAllocation = 1 << 18,
- kTurboPreprocessRanges = 1 << 19
+ kTraceTurboAllocation = 1 << 17,
+ kTraceHeapBroker = 1 << 18,
+ kWasmRuntimeExceptionSupport = 1 << 19,
+ kTurboControlFlowAwareAllocation = 1 << 20,
+ kTurboPreprocessRanges = 1 << 21
};
// Construct a compilation info for optimized compilation.
@@ -189,10 +192,16 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); }
+ bool trace_turbo_allocation_enabled() const {
+ return GetFlag(kTraceTurboAllocation);
+ }
+
bool trace_turbo_scheduled_enabled() const {
return GetFlag(kTraceTurboScheduled);
}
+ bool trace_heap_broker_enabled() const { return GetFlag(kTraceHeapBroker); }
+
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
@@ -204,7 +213,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Context context() const;
bool has_native_context() const;
- Context native_context() const;
+ NativeContext native_context() const;
bool has_global_object() const;
JSGlobalObject global_object() const;
@@ -281,6 +290,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
std::unique_ptr<v8::tracing::TracedValue> ToTracedValue();
+ TickCounter& tick_counter() { return tick_counter_; }
+
private:
OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone);
void ConfigureFlags();
@@ -333,6 +344,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_;
+ TickCounter tick_counter_;
+
DISALLOW_COPY_AND_ASSIGN(OptimizedCompilationInfo);
};
diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc
new file mode 100644
index 0000000000..9e33de7918
--- /dev/null
+++ b/deps/v8/src/codegen/pending-optimization-table.cc
@@ -0,0 +1,97 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/pending-optimization-table.h"
+
+#include "src/execution/isolate-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/js-objects.h"
+
+namespace v8 {
+namespace internal {
+
+enum class FunctionStatus { kPrepareForOptimize, kMarkForOptimize };
+
+void PendingOptimizationTable::PreparedForOptimization(
+ Isolate* isolate, Handle<JSFunction> function) {
+ DCHECK(FLAG_testing_d8_test_runner);
+
+ Handle<ObjectHashTable> table =
+ isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
+ ? ObjectHashTable::New(isolate, 1)
+ : handle(ObjectHashTable::cast(
+ isolate->heap()->pending_optimize_for_test_bytecode()),
+ isolate);
+ Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
+ handle(function->shared().GetBytecodeArray(), isolate),
+ handle(
+ Smi::FromInt(static_cast<int>(FunctionStatus::kPrepareForOptimize)),
+ isolate),
+ AllocationType::kYoung);
+ table =
+ ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple);
+ isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+}
+
+void PendingOptimizationTable::MarkedForOptimization(
+ Isolate* isolate, Handle<JSFunction> function) {
+ DCHECK(FLAG_testing_d8_test_runner);
+
+ Handle<Object> table =
+ handle(isolate->heap()->pending_optimize_for_test_bytecode(), isolate);
+ Handle<Object> entry =
+ table->IsUndefined()
+ ? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate)
+ : handle(Handle<ObjectHashTable>::cast(table)->Lookup(
+ handle(function->shared(), isolate)),
+ isolate);
+ if (entry->IsTheHole()) {
+ PrintF("Error: Function ");
+ function->ShortPrint();
+ PrintF(
+ " should be prepared for optimization with "
+ "%%PrepareFunctionForOptimize before "
+ "%%OptimizeFunctionOnNextCall / %%OptimizeOSR ");
+ UNREACHABLE();
+ }
+
+ DCHECK(entry->IsTuple2());
+ Handle<Tuple2>::cast(entry)->set_value2(
+ Smi::FromInt(static_cast<int>(FunctionStatus::kMarkForOptimize)));
+ table = ObjectHashTable::Put(Handle<ObjectHashTable>::cast(table),
+ handle(function->shared(), isolate), entry);
+ isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+}
+
+void PendingOptimizationTable::FunctionWasOptimized(
+ Isolate* isolate, Handle<JSFunction> function) {
+ DCHECK(FLAG_testing_d8_test_runner);
+
+ if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
+ return;
+ }
+
+ Handle<ObjectHashTable> table =
+ handle(ObjectHashTable::cast(
+ isolate->heap()->pending_optimize_for_test_bytecode()),
+ isolate);
+ Handle<Object> value(table->Lookup(handle(function->shared(), isolate)),
+ isolate);
+ // Remove only if we have already seen %OptimizeFunctionOnNextCall. If it is
+ // optimized for other reasons, still keep holding the bytecode since we may
+ // optimize it later.
+ if (!value->IsTheHole() &&
+ Smi::cast(Handle<Tuple2>::cast(value)->value2()).value() ==
+ static_cast<int>(FunctionStatus::kMarkForOptimize)) {
+ bool was_present;
+ table = table->Remove(isolate, table, handle(function->shared(), isolate),
+ &was_present);
+ DCHECK(was_present);
+ isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/codegen/pending-optimization-table.h b/deps/v8/src/codegen/pending-optimization-table.h
new file mode 100644
index 0000000000..2a2782d17a
--- /dev/null
+++ b/deps/v8/src/codegen/pending-optimization-table.h
@@ -0,0 +1,44 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_
+#define V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// This class adds the functionality to properly test the optimized code. This
+// is only for use in tests. All these functions should only be called when
+// testing_d8_flag_for_tests is set.
+class PendingOptimizationTable {
+ public:
+ // This function should be called before we mark the function for
+ // optimization. Calling this function ensures that |function| is compiled and
+ // has a feedback vector allocated. This also holds on to the bytecode
+ // strongly in pending optimization table preventing the bytecode to be
+ // flushed.
+ static void PreparedForOptimization(Isolate* isolate,
+ Handle<JSFunction> function);
+
+ // This function should be called when the function is marked for optimization
+ // via the intrinsics. This will update the state of the bytecode array in the
+ // pending optimization table, so that the entry can be removed once the
+ // function is optimized. If the function is already optimized it removes the
+ // entry from the table.
+ static void MarkedForOptimization(Isolate* isolate,
+ Handle<JSFunction> function);
+
+ // This function should be called once the function is optimized. If there is
+ // an entry in the pending optimization table and it is marked for removal
+ // then this function removes the entry from pending optimization table.
+ static void FunctionWasOptimized(Isolate* isolate,
+ Handle<JSFunction> function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 3241f821f9..2a638af070 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -224,6 +224,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(ip.bit()),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -1490,8 +1491,7 @@ void Assembler::mtfprwa(DoubleRegister dst, Register src) {
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-ppc.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code,
- CRegister cr) {
+void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
if (cond != al) {
Label skip;
b(NegateCondition(cond), &skip, cr);
@@ -1948,6 +1948,24 @@ PatchingAssembler::~PatchingAssembler() {
DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : assembler_(assembler),
+ old_available_(*assembler->GetScratchRegisterList()) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *assembler_->GetScratchRegisterList() = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_NE(*available, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
+ Register reg = Register::from_code(index);
+ *available &= ~reg.bit();
+ return reg;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 2c4225849f..dee264a75c 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -437,6 +437,7 @@ class Assembler : public AssemblerBase {
PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
#undef DECLARE_PPC_XX3_INSTRUCTIONS
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
// Code generation
@@ -841,8 +842,8 @@ class Assembler : public AssemblerBase {
void function_descriptor();
// Exception-generating instructions and debugging support
- void stop(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode, CRegister cr = cr7);
+ void stop(Condition cond = al, int32_t code = kDefaultStopCode,
+ CRegister cr = cr7);
void bkpt(uint32_t imm16); // v5 and above
@@ -1182,6 +1183,9 @@ class Assembler : public AssemblerBase {
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
std::vector<DeferredRelocInfo> relocations_;
+ // Scratch registers available for use by the Assembler.
+ RegList scratch_register_list_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Optimizable cmpi information.
@@ -1297,6 +1301,7 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
+ friend class UseScratchRegisterScope;
};
class EnsureSpace {
@@ -1311,6 +1316,24 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler();
};
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+
+ // Check if we have registers available to acquire.
+ bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+
+ private:
+ friend class Assembler;
+ friend class TurboAssembler;
+
+ Assembler* assembler_;
+ RegList old_available_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc b/deps/v8/src/codegen/ppc/code-stubs-ppc.cc
deleted file mode 100644
index 937c745662..0000000000
--- a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/api/api-arguments-inl.h"
-#include "src/base/bits.h"
-#include "src/code-stubs.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/frame-constants.h"
-#include "src/execution/frames.h"
-#include "src/execution/isolate.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/init/bootstrapper.h"
-#include "src/numbers/double.h"
-#include "src/objects/api-callbacks.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 62f0fde3b8..8ab3e5b83b 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -419,7 +419,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
andi(r0, dst, Operand(kPointerSize - 1));
beq(&ok, cr0);
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -1721,15 +1721,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -2454,27 +2454,24 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
DCHECK_EQ(mem.rb(), no_reg);
int offset = mem.offset();
+ int misaligned = (offset & 3);
+ int adj = (offset & 3) - 4;
+ int alignedOffset = (offset & ~3) + 4;
- if (!is_int16(offset)) {
+ if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) {
/* cannot use d-form */
- DCHECK_NE(scratch, no_reg);
mov(scratch, Operand(offset));
LoadPX(dst, MemOperand(mem.ra(), scratch));
} else {
-#if V8_TARGET_ARCH_PPC64
- int misaligned = (offset & 3);
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
- DCHECK(dst != r0);
- addi(dst, mem.ra(), Operand((offset & 3) - 4));
- ld(dst, MemOperand(dst, (offset & ~3) + 4));
+ DCHECK_NE(dst, r0);
+ addi(dst, mem.ra(), Operand(adj));
+ ld(dst, MemOperand(dst, alignedOffset));
} else {
ld(dst, mem);
}
-#else
- lwz(dst, mem);
-#endif
}
}
@@ -2934,20 +2931,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
blt(dest);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
- ShiftRightArithImm(builtin_pointer, builtin_pointer,
+ ShiftRightArithImm(builtin_index, builtin_index,
kSmiShift - kSystemPointerSizeLog2);
- addi(builtin_pointer, builtin_pointer,
+ addi(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
- LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index ae24ef9a55..6249c405e3 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -408,11 +408,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Condition cond = al);
void Call(Label* target);
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
- void CallBuiltinPointer(Register builtin_pointer) override;
+ void CallBuiltinByIndex(Register builtin_index) override;
void CallForDeoptimization(Address target, int deopt_id);
// Emit code to discard a non-negative number of pointer-sized elements
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index dbfdc9a32a..6776626a23 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -351,7 +351,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
- : AssemblerBase(options, std::move(buffer)) {
+ : AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(ip.bit()) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_bound_pos_ = 0;
relocations_.reserve(128);
@@ -636,8 +637,7 @@ void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-s390.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code,
- CRegister cr) {
+void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
if (cond != al) {
Label skip;
b(NegateCondition(cond), &skip, Label::kNear);
@@ -831,6 +831,23 @@ void Assembler::EmitRelocations() {
}
}
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : assembler_(assembler),
+ old_available_(*assembler->GetScratchRegisterList()) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *assembler_->GetScratchRegisterList() = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_NE(*available, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
+ Register reg = Register::from_code(index);
+ *available &= ~reg.bit();
+ return reg;
+}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index e22c037a31..0653e79b67 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -307,7 +307,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
static constexpr int kSpecialTargetSize = 0;
-
// Number of bytes for instructions used to store pointer sized constant.
#if V8_TARGET_ARCH_S390X
static constexpr int kBytesForPtrConstant = 12; // IIHF + IILF
@@ -315,6 +314,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kBytesForPtrConstant = 6; // IILF
#endif
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+
// ---------------------------------------------------------------------------
// Code generation
@@ -1261,8 +1262,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void larl(Register r, Label* l);
// Exception-generating instructions and debugging support
- void stop(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode, CRegister cr = cr7);
+ void stop(Condition cond = al, int32_t code = kDefaultStopCode,
+ CRegister cr = cr7);
void bkpt(uint32_t imm16); // v5 and above
@@ -1376,6 +1377,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RelocInfoWriter reloc_info_writer;
std::vector<DeferredRelocInfo> relocations_;
+ // Scratch registers available for use by the Assembler.
+ RegList scratch_register_list_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@@ -1455,6 +1459,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
friend class RegExpMacroAssemblerS390;
friend class RelocInfo;
friend class EnsureSpace;
+ friend class UseScratchRegisterScope;
};
class EnsureSpace {
@@ -1462,6 +1467,24 @@ class EnsureSpace {
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+
+ // Check if we have registers available to acquire.
+ bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+
+ private:
+ friend class Assembler;
+ friend class TurboAssembler;
+
+ Assembler* assembler_;
+ RegList old_available_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/code-stubs-s390.cc b/deps/v8/src/codegen/s390/code-stubs-s390.cc
deleted file mode 100644
index f85c309943..0000000000
--- a/deps/v8/src/codegen/s390/code-stubs-s390.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/api/api-arguments-inl.h"
-#include "src/base/bits.h"
-#include "src/code-stubs.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/frame-constants.h"
-#include "src/execution/frames.h"
-#include "src/execution/isolate.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/init/bootstrapper.h"
-#include "src/objects/api-callbacks.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index ff94fa839e..f6c2314a84 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -440,7 +440,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
AndP(r0, dst, Operand(kPointerSize - 1));
beq(&ok, Label::kNear);
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -1670,15 +1670,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -4332,20 +4332,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
blt(dest);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
- ShiftRightArithP(builtin_pointer, builtin_pointer,
+ ShiftRightArithP(builtin_index, builtin_index,
Operand(kSmiShift - kSystemPointerSizeLog2));
- AddP(builtin_pointer, builtin_pointer,
+ AddP(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
- LoadP(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ LoadP(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index ba870874c8..52f668d175 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -166,11 +166,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target);
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
- void CallBuiltinPointer(Register builtin_pointer) override;
+ void CallBuiltinByIndex(Register builtin_index) override;
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 066f0123fc..fccce1a7a6 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -5,8 +5,8 @@
#ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_
#define V8_CODEGEN_SAFEPOINT_TABLE_H_
+#include "src/base/memory.h"
#include "src/common/assert-scope.h"
-#include "src/common/v8memory.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
#include "src/zone/zone-chunk-list.h"
@@ -76,22 +76,23 @@ class SafepointTable {
unsigned GetPcOffset(unsigned index) const {
DCHECK(index < length_);
- return Memory<uint32_t>(GetPcOffsetLocation(index));
+ return base::Memory<uint32_t>(GetPcOffsetLocation(index));
}
int GetTrampolinePcOffset(unsigned index) const {
DCHECK(index < length_);
- return Memory<int>(GetTrampolineLocation(index));
+ return base::Memory<int>(GetTrampolineLocation(index));
}
unsigned find_return_pc(unsigned pc_offset);
SafepointEntry GetEntry(unsigned index) const {
DCHECK(index < length_);
- unsigned deopt_index = Memory<uint32_t>(GetEncodedInfoLocation(index));
- uint8_t* bits = &Memory<uint8_t>(entries_ + (index * entry_size_));
+ unsigned deopt_index =
+ base::Memory<uint32_t>(GetEncodedInfoLocation(index));
+ uint8_t* bits = &base::Memory<uint8_t>(entries_ + (index * entry_size_));
int trampoline_pc =
- has_deopt_ ? Memory<int>(GetTrampolineLocation(index)) : -1;
+ has_deopt_ ? base::Memory<int>(GetTrampolineLocation(index)) : -1;
return SafepointEntry(deopt_index, bits, trampoline_pc);
}
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 6c0aa36b27..e10cc07571 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -31,7 +31,7 @@ class MoreBit : public BitField8<bool, 7, 1> {};
class ValueBits : public BitField8<unsigned, 0, 7> {};
// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
-void AddAndSetEntry(PositionTableEntry& value,
+void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references)
const PositionTableEntry& other) {
value.code_offset += other.code_offset;
value.source_position += other.source_position;
@@ -39,7 +39,7 @@ void AddAndSetEntry(PositionTableEntry& value,
}
// Helper: Subtract the offsets from 'other' from 'value'.
-void SubtractFromEntry(PositionTableEntry& value,
+void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references)
const PositionTableEntry& other) {
value.code_offset -= other.code_offset;
value.source_position -= other.source_position;
@@ -47,7 +47,8 @@ void SubtractFromEntry(PositionTableEntry& value,
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(std::vector<byte>& bytes, T value) {
+void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
+ T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
@@ -65,7 +66,8 @@ void EncodeInt(std::vector<byte>& bytes, T value) {
}
// Encode a PositionTableEntry.
-void EncodeEntry(std::vector<byte>& bytes, const PositionTableEntry& entry) {
+void EncodeEntry(std::vector<byte>& bytes, // NOLINT(runtime/references)
+ const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@@ -113,8 +115,9 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
-void CheckTableEquals(std::vector<PositionTableEntry>& raw_entries,
- SourcePositionTableIterator& encoded) {
+void CheckTableEquals(
+ std::vector<PositionTableEntry>& raw_entries, // NOLINT(runtime/references)
+ SourcePositionTableIterator& encoded) { // NOLINT(runtime/references)
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
auto raw = raw_entries.begin();
diff --git a/deps/v8/src/codegen/tick-counter.cc b/deps/v8/src/codegen/tick-counter.cc
new file mode 100644
index 0000000000..2e72ae0e86
--- /dev/null
+++ b/deps/v8/src/codegen/tick-counter.cc
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/tick-counter.h"
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+void TickCounter::DoTick() {
+ ++ticks_;
+ // Magical number to detect performance bugs or compiler divergence.
+ // Selected as being roughly 10x of what's needed frequently.
+ constexpr size_t kMaxTicks = 100000000;
+ USE(kMaxTicks);
+ DCHECK_LT(ticks_, kMaxTicks);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/codegen/tick-counter.h b/deps/v8/src/codegen/tick-counter.h
new file mode 100644
index 0000000000..8d6c966bb0
--- /dev/null
+++ b/deps/v8/src/codegen/tick-counter.h
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_TICK_COUNTER_H_
+#define V8_CODEGEN_TICK_COUNTER_H_
+
+#include <cstddef>
+
+namespace v8 {
+namespace internal {
+
+// A deterministic correlate of time, used to detect performance or
+// divergence bugs in Turbofan. DoTick() should be called frequently
+// thoughout the compilation.
+class TickCounter {
+ public:
+ void DoTick();
+ size_t CurrentTicks() const { return ticks_; }
+
+ private:
+ size_t ticks_ = 0;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_TICK_COUNTER_H_
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index afdef22fe7..2f058eda19 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -50,9 +50,9 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
void set_has_frame(bool v) { has_frame_ = v; }
bool has_frame() const { return has_frame_; }
- // Calls the given builtin. If builtins are embedded, the trampoline Code
- // object on the heap is not used.
- virtual void CallBuiltinPointer(Register builtin_pointer) = 0;
+ // Calls the builtin given by the Smi in |builtin|. If builtins are embedded,
+ // the trampoline Code object on the heap is not used.
+ virtual void CallBuiltinByIndex(Register builtin_index) = 0;
// Calls/jumps to the given Code object. If builtins are embedded, the
// trampoline Code object on the heap is not used.
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 67cf648c04..f5d0c0ffcf 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -8,7 +8,7 @@
#include "src/codegen/x64/assembler-x64.h"
#include "src/base/cpu.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
@@ -246,7 +246,7 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
}
Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
- return GetCompressedEmbeddedObject(ReadUnalignedValue<int32_t>(pc));
+ return GetEmbeddedObject(ReadUnalignedValue<uint32_t>(pc));
}
Address Assembler::runtime_entry_at(Address pc) {
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 3236b0f52c..1d28f1d45d 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -78,6 +78,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
+ if (cpu.has_sse42() && FLAG_enable_sse4_2) supported_ |= 1u << SSE4_2;
if (cpu.has_sse41() && FLAG_enable_sse4_1) {
supported_ |= 1u << SSE4_1;
supported_ |= 1u << SSSE3;
@@ -1257,6 +1258,13 @@ void Assembler::emit_cmpxchg(Operand dst, Register src, int size) {
emit_operand(src, dst);
}
+void Assembler::mfence() {
+ EnsureSpace ensure_space(this);
+ emit(0x0F);
+ emit(0xAE);
+ emit(0xF0);
+}
+
void Assembler::lfence() {
EnsureSpace ensure_space(this);
emit(0x0F);
@@ -1512,19 +1520,20 @@ void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
emitl(code_target_index);
}
-void Assembler::jmp_rel(int offset) {
+void Assembler::jmp_rel(int32_t offset) {
EnsureSpace ensure_space(this);
- const int short_size = sizeof(int8_t);
- const int long_size = sizeof(int32_t);
- --offset; // This is how jumps are specified on x64.
- if (is_int8(offset - short_size) && !predictable_code_size()) {
- // 1110 1011 #8-bit disp.
+ // The offset is encoded relative to the next instruction.
+ constexpr int32_t kShortJmpDisplacement = 1 + sizeof(int8_t);
+ constexpr int32_t kNearJmpDisplacement = 1 + sizeof(int32_t);
+ DCHECK_LE(std::numeric_limits<int32_t>::min() + kNearJmpDisplacement, offset);
+ if (is_int8(offset - kShortJmpDisplacement) && !predictable_code_size()) {
+ // 0xEB #8-bit disp.
emit(0xEB);
- emit((offset - short_size) & 0xFF);
+ emit(offset - kShortJmpDisplacement);
} else {
- // 1110 1001 #32-bit disp.
+ // 0xE9 #32-bit disp.
emit(0xE9);
- emitl(offset - long_size);
+ emitl(offset - kNearJmpDisplacement);
}
}
@@ -2005,84 +2014,37 @@ void Assembler::emit_not(Operand dst, int size) {
}
void Assembler::Nop(int n) {
+ DCHECK_LE(0, n);
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
//
- // Length Assembly Byte Sequence
- // 2 bytes 66 NOP 66 90H
- // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H
- // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
- // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
- // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
- // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
- // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
- // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
- // 00000000H] 00H
-
- EnsureSpace ensure_space(this);
- while (n > 0) {
- switch (n) {
- case 2:
- emit(0x66);
- V8_FALLTHROUGH;
- case 1:
- emit(0x90);
- return;
- case 3:
- emit(0x0F);
- emit(0x1F);
- emit(0x00);
- return;
- case 4:
- emit(0x0F);
- emit(0x1F);
- emit(0x40);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- V8_FALLTHROUGH;
- case 5:
- emit(0x0F);
- emit(0x1F);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0F);
- emit(0x1F);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- default:
- case 11:
- emit(0x66);
- n--;
- V8_FALLTHROUGH;
- case 10:
- emit(0x66);
- n--;
- V8_FALLTHROUGH;
- case 9:
- emit(0x66);
- n--;
- V8_FALLTHROUGH;
- case 8:
- emit(0x0F);
- emit(0x1F);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- n -= 8;
- }
- }
+ // Len Assembly Byte Sequence
+ // 2 66 NOP 66 90H
+ // 3 NOP DWORD ptr [EAX] 0F 1F 00H
+ // 4 NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
+ // 5 NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
+ // 6 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
+ // 7 NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
+ // 8 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
+ // 9 66 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 66 0F 1F 84 00 00 00 00 00H
+
+ constexpr const char* kNopSequences =
+ "\x66\x90" // length 1 (@1) / 2 (@0)
+ "\x0F\x1F\x00" // length 3 (@2)
+ "\x0F\x1F\x40\x00" // length 4 (@5)
+ "\x66\x0F\x1F\x44\x00\x00" // length 5 (@10) / 6 (@9)
+ "\x0F\x1F\x80\x00\x00\x00\x00" // length 7 (@15)
+ "\x66\x0F\x1F\x84\x00\x00\x00\x00\x00"; // length 8 (@23) / 9 (@22)
+ constexpr int8_t kNopOffsets[10] = {0, 1, 0, 2, 5, 10, 9, 15, 23, 22};
+
+ do {
+ EnsureSpace ensure_space(this);
+ int nop_bytes = std::min(n, 9);
+ const char* sequence = kNopSequences + kNopOffsets[nop_bytes];
+ memcpy(pc_, sequence, nop_bytes);
+ pc_ += nop_bytes;
+ n -= nop_bytes;
+ } while (n);
}
void Assembler::popq(Register dst) {
@@ -2883,6 +2845,18 @@ void Assembler::movd(Register dst, XMMRegister src) {
}
void Assembler::movq(XMMRegister dst, Register src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6E);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movq(XMMRegister dst, Operand src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2893,6 +2867,7 @@ void Assembler::movq(XMMRegister dst, Register src) {
}
void Assembler::movq(Register dst, XMMRegister src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2903,6 +2878,7 @@ void Assembler::movq(Register dst, XMMRegister src) {
}
void Assembler::movq(XMMRegister dst, XMMRegister src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (dst.low_bits() == 4) {
@@ -3068,6 +3044,42 @@ void Assembler::pextrd(Operand dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
+void Assembler::pextrq(Register dst, XMMRegister src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x16);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
+
+void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -4135,6 +4147,22 @@ void Assembler::vmovq(Register dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovdqu(XMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::vmovdqu(Operand src, XMMRegister dst) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG);
+ emit(0x7F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
VexW w) {
@@ -4654,6 +4682,30 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
emit_sse_operand(dst, src);
}
+void Assembler::sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
+ DCHECK(IsEnabled(SSE4_2));
+ EnsureSpace ensure_space(this);
+ emit(prefix);
+ emit_optional_rex_32(dst, src);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::sse4_2_instr(XMMRegister dst, Operand src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
+ DCHECK(IsEnabled(SSE4_2));
+ EnsureSpace ensure_space(this);
+ emit(prefix);
+ emit_optional_rex_32(dst, src);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::lddqu(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index dc6acb67f4..acb4fce82c 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -952,6 +952,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
#undef DECLARE_SSE4_INSTRUCTION
+ // SSE4.2
+ void sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void sse4_2_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+#define DECLARE_SSE4_2_INSTRUCTION(instruction, prefix, escape1, escape2, \
+ opcode) \
+ void instruction(XMMRegister dst, XMMRegister src) { \
+ sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ } \
+ void instruction(XMMRegister dst, Operand src) { \
+ sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ }
+
+ SSE4_2_INSTRUCTION_LIST(DECLARE_SSE4_2_INSTRUCTION)
+#undef DECLARE_SSE4_2_INSTRUCTION
+
#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
opcode) \
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
@@ -969,6 +986,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
+ void movq(XMMRegister dst, Operand src);
void movq(Register dst, XMMRegister src);
void movq(XMMRegister dst, XMMRegister src);
@@ -1068,12 +1086,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pextrw(Operand dst, XMMRegister src, int8_t imm8);
void pextrd(Register dst, XMMRegister src, int8_t imm8);
void pextrd(Operand dst, XMMRegister src, int8_t imm8);
+ void pextrq(Register dst, XMMRegister src, int8_t imm8);
void pinsrb(XMMRegister dst, Register src, int8_t imm8);
void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
void pinsrw(XMMRegister dst, Register src, int8_t imm8);
void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
+ void pinsrq(XMMRegister dst, Register src, int8_t imm8);
+ void pinsrq(XMMRegister dst, Operand src, int8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1284,6 +1305,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
+ void vmovdqu(XMMRegister dst, Operand src);
+ void vmovdqu(Operand dst, XMMRegister src);
#define AVX_SP_3(instr, opcode) \
AVX_S_3(instr, opcode) \
@@ -1723,6 +1746,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, Operand src, byte imm8);
+ void mfence();
void lfence();
void pause();
diff --git a/deps/v8/src/codegen/x64/constants-x64.h b/deps/v8/src/codegen/x64/constants-x64.h
index 0e43b05034..775abecd9f 100644
--- a/deps/v8/src/codegen/x64/constants-x64.h
+++ b/deps/v8/src/codegen/x64/constants-x64.h
@@ -12,7 +12,8 @@ namespace internal {
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
// TODO(sigurds): Choose best value.
-constexpr int kRootRegisterBias = 128;
+// TODO(ishell): Choose best value for ptr-compr.
+constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 128 : 0;
constexpr size_t kMaxPCRelativeCodeRangeInMB = 2048;
} // namespace internal
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 493c711009..f13811b1ae 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -317,15 +317,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressRegisterAnyTagged(Register destination,
Register scratch) {
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInGeneratedCode) {
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
Register masked_root = scratch;
- movl(masked_root, destination);
- andl(masked_root, Immediate(kSmiTagMask));
- negq(masked_root);
- andq(masked_root, kRootRegister);
+ xorq(masked_root, masked_root);
+ Condition smi = CheckSmi(destination);
+ cmovq(NegateCondition(smi), masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is
// a smi or add the isolate root if it is a heap object.
addq(destination, masked_root);
@@ -917,7 +916,7 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
orq(kScratchRegister, Immediate(1));
bind(&msb_not_set);
Cvtqsi2ss(dst, kScratchRegister);
- addss(dst, dst);
+ Addss(dst, dst);
bind(&done);
}
@@ -941,7 +940,7 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
orq(kScratchRegister, Immediate(1));
bind(&msb_not_set);
Cvtqsi2sd(dst, kScratchRegister);
- addsd(dst, dst);
+ Addsd(dst, dst);
bind(&done);
}
@@ -1042,11 +1041,11 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
// and convert it again to see if it is within the uint64 range.
if (is_double) {
tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
- tasm->addsd(kScratchDoubleReg, src);
+ tasm->Addsd(kScratchDoubleReg, src);
tasm->Cvttsd2siq(dst, kScratchDoubleReg);
} else {
tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
- tasm->addss(kScratchDoubleReg, src);
+ tasm->Addss(kScratchDoubleReg, src);
tasm->Cvttss2siq(dst, kScratchDoubleReg);
}
tasm->testq(dst, dst);
@@ -1468,8 +1467,9 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
}
}
if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- int compressed_embedded_object_index = AddCompressedEmbeddedObject(object);
- movl(result, Immediate(compressed_embedded_object_index, rmode));
+ EmbeddedObjectIndex index = AddEmbeddedObject(object);
+ DCHECK(is_uint32(index));
+ movl(result, Immediate(static_cast<int>(index), rmode));
} else {
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
movq(result, Immediate64(object.address(), rmode));
@@ -1607,29 +1607,33 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below (we use times_4 instead
// of times_8 since smis are already shifted by one).
- Call(Operand(kRootRegister, builtin_pointer, times_4,
- IsolateData::builtin_entry_table_offset()));
+ return Operand(kRootRegister, builtin_index, times_4,
+ IsolateData::builtin_entry_table_offset());
#else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
- SmiUntag(builtin_pointer, builtin_pointer);
- Call(Operand(kRootRegister, builtin_pointer, times_8,
- IsolateData::builtin_entry_table_offset()));
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ return Operand(kRootRegister, builtin_index, times_8,
+ IsolateData::builtin_entry_table_offset());
#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
}
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ Call(EntryFromBuiltinIndexAsOperand(builtin_index));
+}
+
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@@ -1767,6 +1771,46 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
}
}
+void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsllq(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ psllq(dst, imm8);
+ }
+}
+
+void TurboAssembler::Psrlq(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrlq(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ psrlq(dst, imm8);
+ }
+}
+
+void TurboAssembler::Pslld(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpslld(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ pslld(dst, imm8);
+ }
+}
+
+void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrld(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ psrld(dst, imm8);
+ }
+}
+
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index a5b8e60ec5..139690bb8d 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
template <typename Dst, typename... Args>
struct AvxHelper {
Assembler* assm;
- // Call an method where the AVX version expects the dst argument to be
+ // Call a method where the AVX version expects the dst argument to be
// duplicated.
template <void (Assembler::*avx)(Dst, Dst, Args...),
void (Assembler::*no_avx)(Dst, Args...)>
@@ -93,7 +93,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
}
- // Call an method where the AVX version expects no duplicated dst argument.
+ // Call a method where the AVX version expects no duplicated dst argument.
template <void (Assembler::*avx)(Dst, Args...),
void (Assembler::*no_avx)(Dst, Args...)>
void emit(Dst dst, Args... args) {
@@ -127,11 +127,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Movmskpd, movmskpd)
AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
+ AVX_OP(Movdqu, movdqu)
AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP(Pslld, pslld)
- AVX_OP(Psllq, psllq)
- AVX_OP(Psrld, psrld)
- AVX_OP(Psrlq, psrlq)
+ AVX_OP(Addss, addss)
AVX_OP(Addsd, addsd)
AVX_OP(Mulsd, mulsd)
AVX_OP(Andps, andps)
@@ -344,7 +342,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
- void CallBuiltinPointer(Register builtin_pointer) override;
+ Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -368,6 +367,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
+ void Psllq(XMMRegister dst, byte imm8);
+ void Psrlq(XMMRegister dst, byte imm8);
+ void Pslld(XMMRegister dst, byte imm8);
+ void Psrld(XMMRegister dst, byte imm8);
+
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index ee20483cfe..56618d20e0 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -21,6 +21,7 @@
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \
+ V(paddq, 66, 0F, D4) \
V(paddsb, 66, 0F, EC) \
V(paddsw, 66, 0F, ED) \
V(paddusb, 66, 0F, DC) \
@@ -46,6 +47,7 @@
V(psubb, 66, 0F, F8) \
V(psubw, 66, 0F, F9) \
V(psubd, 66, 0F, FA) \
+ V(psubq, 66, 0F, FB) \
V(psubsb, 66, 0F, E8) \
V(psubsw, 66, 0F, E9) \
V(psubusb, 66, 0F, D8) \
@@ -66,6 +68,7 @@
V(psignd, 66, 0F, 38, 0A)
#define SSE4_INSTRUCTION_LIST(V) \
+ V(pcmpeqq, 66, 0F, 38, 29) \
V(ptest, 66, 0F, 38, 17) \
V(pmovsxbw, 66, 0F, 38, 20) \
V(pmovsxwd, 66, 0F, 38, 23) \
@@ -82,4 +85,6 @@
V(pmaxud, 66, 0F, 38, 3F) \
V(pmulld, 66, 0F, 38, 40)
+#define SSE4_2_INSTRUCTION_LIST(V) V(pcmpgtq, 66, 0F, 38, 37)
+
#endif // V8_CODEGEN_X64_SSE_INSTR_H_
diff --git a/deps/v8/src/common/OWNERS b/deps/v8/src/common/OWNERS
new file mode 100644
index 0000000000..3f9de7e204
--- /dev/null
+++ b/deps/v8/src/common/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 5d4b957e84..8d1bf5dfcc 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -212,15 +212,6 @@ constexpr size_t kReservedCodeRangePages = 0;
STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
-// This macro is used for declaring and defining HeapObject getter methods that
-// are a bit more efficient for the pointer compression case than the default
-// parameterless getters because isolate root doesn't have to be computed from
-// arbitrary field address but it comes "for free" instead.
-// These alternatives are always defined (in order to avoid #ifdef mess but
-// are not supposed to be used when pointer compression is not enabled.
-#define ROOT_VALUE isolate_for_root
-#define ROOT_PARAM Isolate* const ROOT_VALUE
-
#ifdef V8_COMPRESS_POINTERS
static_assert(
kSystemPointerSize == kInt64Size,
@@ -234,11 +225,6 @@ constexpr int kTaggedSizeLog2 = 2;
using Tagged_t = int32_t;
using AtomicTagged_t = base::Atomic32;
-#define DEFINE_ROOT_VALUE(isolate) ROOT_PARAM = isolate
-#define WITH_ROOT_PARAM(...) ROOT_PARAM, ##__VA_ARGS__
-#define WITH_ROOT_VALUE(...) ROOT_VALUE, ##__VA_ARGS__
-#define WITH_ROOT(isolate_for_root, ...) isolate_for_root, ##__VA_ARGS__
-
#else
constexpr int kTaggedSize = kSystemPointerSize;
@@ -249,16 +235,12 @@ constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
using Tagged_t = Address;
using AtomicTagged_t = base::AtomicWord;
-#define DEFINE_ROOT_VALUE(isolate)
-#define WITH_ROOT_PARAM(...) __VA_ARGS__
-#define WITH_ROOT_VALUE(...) __VA_ARGS__
-#define WITH_ROOT(isolate_for_root, ...) __VA_ARGS__
-
#endif // V8_COMPRESS_POINTERS
// Defines whether the branchless or branchful implementation of pointer
// decompression should be used.
-constexpr bool kUseBranchlessPtrDecompression = true;
+constexpr bool kUseBranchlessPtrDecompressionInRuntime = false;
+constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES);
@@ -667,7 +649,6 @@ struct SlotTraits;
template <>
struct SlotTraits<SlotLocation::kOffHeap> {
using TObjectSlot = FullObjectSlot;
- using TMapWordSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
};
@@ -678,12 +659,10 @@ template <>
struct SlotTraits<SlotLocation::kOnHeap> {
#ifdef V8_COMPRESS_POINTERS
using TObjectSlot = CompressedObjectSlot;
- using TMapWordSlot = CompressedMapWordSlot;
using TMaybeObjectSlot = CompressedMaybeObjectSlot;
using THeapObjectSlot = CompressedHeapObjectSlot;
#else
using TObjectSlot = FullObjectSlot;
- using TMapWordSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
#endif
@@ -693,10 +672,6 @@ struct SlotTraits<SlotLocation::kOnHeap> {
// holding Object value (smi or strong heap object).
using ObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TObjectSlot;
-// An MapWordSlot instance describes a kTaggedSize-sized on-heap field ("slot")
-// holding HeapObject (strong heap object) value or a forwarding pointer.
-using MapWordSlot = SlotTraits<SlotLocation::kOnHeap>::TMapWordSlot;
-
// A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field
// ("slot") holding MaybeObject (smi or weak heap object or strong heap object).
using MaybeObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TMaybeObjectSlot;
@@ -1193,7 +1168,7 @@ enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
-enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
+enum RequiresBrandCheckFlag : uint8_t { kNoBrandCheck, kRequiresBrandCheck };
enum class InterpreterPushArgsMode : unsigned {
kArrayFunction,
@@ -1554,6 +1529,12 @@ constexpr int kFunctionLiteralIdTopLevel = 0;
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
+// Opaque data type for identifying stack frames. Used extensively
+// by the debugger.
+// ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
+// has correct value range (see Issue 830 for more details).
+enum StackFrameId { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/message-template.h b/deps/v8/src/common/message-template.h
index ae88aa4411..fedbfa5a10 100644
--- a/deps/v8/src/execution/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXECUTION_MESSAGE_TEMPLATE_H_
-#define V8_EXECUTION_MESSAGE_TEMPLATE_H_
+#ifndef V8_COMMON_MESSAGE_TEMPLATE_H_
+#define V8_COMMON_MESSAGE_TEMPLATE_H_
#include "src/base/logging.h"
@@ -90,6 +90,7 @@ namespace internal {
T(ImmutablePrototypeSet, \
"Immutable prototype object '%' cannot have their prototype set") \
T(ImportCallNotNewExpression, "Cannot use new with import") \
+ T(ImportOutsideModule, "Cannot use import statement outside a module") \
T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
T(ImportMissingSpecifier, "import() requires a specifier") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
@@ -415,6 +416,7 @@ namespace internal {
"Read of private field % from an object which did not contain the field") \
T(InvalidPrivateFieldWrite, \
"Write of private field % to an object which did not contain the field") \
+ T(InvalidPrivateMethodWrite, "Private method '%' is not writable") \
T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
@@ -495,7 +497,7 @@ namespace internal {
T(UnexpectedSuper, "'super' keyword unexpected here") \
T(UnexpectedNewTarget, "new.target expression is not allowed here") \
T(UnexpectedTemplateString, "Unexpected template string") \
- T(UnexpectedToken, "Unexpected token %") \
+ T(UnexpectedToken, "Unexpected token '%'") \
T(UnexpectedTokenUnaryExponentiation, \
"Unary operator used immediately before exponentiation expression. " \
"Parenthesis must be used to disambiguate operator precedence") \
@@ -562,6 +564,8 @@ namespace internal {
T(TraceEventPhaseError, "Trace event phase must be a number.") \
T(TraceEventIDError, "Trace event id must be a number.") \
/* Weak refs */ \
+ T(WeakRefsUnregisterTokenMustBeObject, \
+ "unregisterToken ('%') must be an object") \
T(WeakRefsCleanupMustBeCallable, \
"FinalizationGroup: cleanup must be callable") \
T(WeakRefsRegisterTargetMustBeObject, \
@@ -576,16 +580,16 @@ enum class MessageTemplate {
#define TEMPLATE(NAME, STRING) k##NAME,
MESSAGE_TEMPLATES(TEMPLATE)
#undef TEMPLATE
- kLastMessage
+ kMessageCount
};
inline MessageTemplate MessageTemplateFromInt(int message_id) {
- DCHECK_LE(0, message_id);
- DCHECK_LT(message_id, static_cast<int>(MessageTemplate::kLastMessage));
+ DCHECK_LT(static_cast<unsigned>(message_id),
+ static_cast<unsigned>(MessageTemplate::kMessageCount));
return static_cast<MessageTemplate>(message_id);
}
} // namespace internal
} // namespace v8
-#endif // V8_EXECUTION_MESSAGE_TEMPLATE_H_
+#endif // V8_COMMON_MESSAGE_TEMPLATE_H_
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index fd0f97e904..00a79bb291 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -25,8 +25,12 @@ V8_INLINE Address GetIsolateRoot(TOnHeapAddress on_heap_addr);
template <>
V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) {
+ // We subtract 1 here in order to let the compiler generate addition of 32-bit
+ // signed constant instead of 64-bit constant (the problem is that 2Gb looks
+ // like a negative 32-bit value). It's correct because we will never use
+ // leftmost address of V8 heap as |on_heap_addr|.
return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr +
- kPtrComprIsolateRootBias);
+ kPtrComprIsolateRootBias - 1);
}
template <>
@@ -34,17 +38,10 @@ V8_INLINE Address GetIsolateRoot<Isolate*>(Isolate* isolate) {
return isolate->isolate_root();
}
-template <>
-V8_INLINE Address GetIsolateRoot<const Isolate*>(const Isolate* isolate) {
- return isolate->isolate_root();
-}
-
// Decompresses smi value.
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
- // Current compression scheme requires |raw_value| to be sign-extended
- // from int32_t to intptr_t.
- intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
- return static_cast<Address>(value);
+ // For runtime code the upper 32-bits of the Smi value do not matter.
+ return static_cast<Address>(raw_value);
}
// Decompresses weak or strong heap object pointer or forwarding pointer,
@@ -63,18 +60,18 @@ V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
- // Current compression scheme requires |raw_value| to be sign-extended
- // from int32_t to intptr_t.
- intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInRuntime) {
+ // Current compression scheme requires |raw_value| to be sign-extended
+ // from int32_t to intptr_t.
+ intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
// |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = static_cast<Address>(-(value & kSmiTagMask));
Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr);
return root_or_zero + static_cast<Address>(value);
} else {
- return HAS_SMI_TAG(value)
- ? static_cast<Address>(value)
- : (GetIsolateRoot(on_heap_addr) + static_cast<Address>(value));
+ return HAS_SMI_TAG(raw_value)
+ ? DecompressTaggedSigned(raw_value)
+ : DecompressTaggedPointer(on_heap_addr, raw_value);
}
}
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
new file mode 100644
index 0000000000..9664a4857c
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -0,0 +1,7 @@
+ahaas@chromium.org
+jkummerow@chromium.org
+leszeks@chromium.org
+mstarzinger@chromium.org
+rmcilroy@chromium.org
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 39beced3f3..50e2af7129 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
@@ -19,6 +17,7 @@ per-file wasm-*=gdeepti@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
-per-file simd-scalar-lowering.*=aseemgarg@chromium.org
+per-file simd-scalar-lowering.*=bbudge@chromium.org
+per-file simd-scalar-lowering.*=gdeepti@chromium.org
# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/STYLE b/deps/v8/src/compiler/STYLE
deleted file mode 100644
index ae41e3f989..0000000000
--- a/deps/v8/src/compiler/STYLE
+++ /dev/null
@@ -1,29 +0,0 @@
-Compiler Coding Style
-=====================
-
-Coding style for the TurboFan compiler generally follows the Google C++ Style
-Guide and the Chromium Coding Style. The notes below are usually just extensions
-beyond what the Google style guide already says. If this document doesn't
-mention a rule, follow the Google C++ style.
-
-
-TODOs
------
-We use the following convention for putting TODOs into the code:
-
- * A TODO(turbofan) implies a performance improvement opportunity.
- * A TODO(name) implies an incomplete implementation.
-
-
-Use of C++11 auto keyword
--------------------------
-Use auto to avoid type names that are just clutter. Continue to use manifest
-type declarations when it helps readability, and never use auto for anything
-but local variables, in particular auto should only be used where it is obvious
-from context what the type is:
-
- for (auto block : x->blocks()) // clearly a Block of some kind
- for (auto instr : x->instructions()) // clearly an Instruction of some kind
-
- for (auto b : x->predecessors()) // less clear, better to make it explicit
- for (BasicBlock* b : x->predecessors()) // now clear
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 726a81a465..a369de4885 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -14,9 +14,9 @@
#include "src/objects/heap-number.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/source-text-module.h"
namespace v8 {
namespace internal {
@@ -72,6 +72,26 @@ FieldAccess AccessBuilder::ForBigIntBitfield() {
}
// static
+FieldAccess AccessBuilder::ForBigIntOptionalPadding() {
+ DCHECK_EQ(FIELD_SIZE(BigInt::kOptionalPaddingOffset), 4);
+ FieldAccess access = {
+ kTaggedBase, BigInt::kOptionalPaddingOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kInt32, MachineType::Uint32(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForBigIntLeastSignificantDigit64() {
+ DCHECK_EQ(BigInt::SizeFor(1) - BigInt::SizeFor(0), 8);
+ FieldAccess access = {
+ kTaggedBase, BigInt::kDigitsOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kBigUint64, MachineType::Uint64(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {
kTaggedBase, JSObject::kPropertiesOrHashOffset,
@@ -626,7 +646,7 @@ FieldAccess AccessBuilder::ForMapPrototype() {
// static
FieldAccess AccessBuilder::ForModuleRegularExports() {
FieldAccess access = {
- kTaggedBase, Module::kRegularExportsOffset,
+ kTaggedBase, SourceTextModule::kRegularExportsOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
@@ -636,7 +656,7 @@ FieldAccess AccessBuilder::ForModuleRegularExports() {
// static
FieldAccess AccessBuilder::ForModuleRegularImports() {
FieldAccess access = {
- kTaggedBase, Module::kRegularImportsOffset,
+ kTaggedBase, SourceTextModule::kRegularImportsOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
@@ -847,7 +867,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
// static
FieldAccess AccessBuilder::ForValue() {
FieldAccess access = {
- kTaggedBase, JSValue::kValueOffset,
+ kTaggedBase, JSPrimitiveWrapper::kValueOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::NonInternal(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index e38c487b1a..e3a17fe257 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -42,6 +42,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to BigInt's bit field.
static FieldAccess ForBigIntBitfield();
+ // Provides access to BigInt's 32 bit padding that is placed after the
+ // bitfield on 64 bit architectures without pointer compression. Do not use
+ // this on 32 bit architectures.
+ static FieldAccess ForBigIntOptionalPadding();
+
+ // Provides access to BigInt's least significant digit on 64 bit
+ // architectures. Do not use this on 32 bit architectures.
+ static FieldAccess ForBigIntLeastSignificantDigit64();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectPropertiesOrHash();
@@ -263,7 +272,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSStringIterator::index() field.
static FieldAccess ForJSStringIteratorIndex();
- // Provides access to JSValue::value() field.
+ // Provides access to JSPrimitiveWrapper::value() field.
static FieldAccess ForValue();
// Provides access to Cell::value() field.
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 713484f734..6fc9e8214e 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -8,6 +8,7 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/compilation-dependency.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
#include "src/logging/counters.h"
@@ -78,7 +79,7 @@ PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone,
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -90,7 +91,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
// static
PropertyAccessInfo PropertyAccessInfo::DataConstant(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -156,8 +157,7 @@ PropertyAccessInfo::PropertyAccessInfo(
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map,
ZoneVector<Handle<Map>>&& receiver_maps,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies)
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
receiver_maps_(receiver_maps),
unrecorded_dependencies_(std::move(unrecorded_dependencies)),
@@ -258,11 +258,6 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
}
-Handle<Cell> PropertyAccessInfo::export_cell() const {
- DCHECK_EQ(kModuleExport, kind_);
- return Handle<Cell>::cast(constant_);
-}
-
AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
CompilationDependencies* dependencies,
Zone* zone)
@@ -336,11 +331,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
MapRef map_ref(broker(), map);
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies(zone());
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
+ map_ref.SerializeOwnDescriptor(descriptor);
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
- map_ref.SerializeOwnDescriptor(descriptor);
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
descriptor));
@@ -360,19 +354,23 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
}
- map_ref.SerializeOwnDescriptor(descriptor);
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
descriptor));
if (descriptors_field_type->IsClass()) {
- unrecorded_dependencies.push_back(
- dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
field_type = Type::For(MapRef(broker(), map));
field_map = MaybeHandle<Map>(map);
}
+ } else {
+ CHECK(details_representation.IsTagged());
}
+ // TODO(turbofan): We may want to do this only depending on the use
+ // of the access info.
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
+
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
@@ -445,9 +443,6 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
holder.is_null());
DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null());
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) {
- return PropertyAccessInfo::Invalid(zone());
- }
}
if (access_mode == AccessMode::kLoad) {
Handle<Name> cached_property_name;
@@ -569,7 +564,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
if (map_prototype->map().is_deprecated()) {
// Try to migrate the prototype object so we don't embed the deprecated
// map into the optimized code.
- JSObject::TryMigrateInstance(map_prototype);
+ JSObject::TryMigrateInstance(isolate(), map_prototype);
}
map = handle(map_prototype->map(), isolate());
holder = map_prototype;
@@ -611,8 +606,7 @@ void AccessInfoFactory::ComputePropertyAccessInfos(
void PropertyAccessInfo::RecordDependencies(
CompilationDependencies* dependencies) {
- for (CompilationDependencies::Dependency const* d :
- unrecorded_dependencies_) {
+ for (CompilationDependency const* d : unrecorded_dependencies_) {
dependencies->RecordDependency(d);
}
unrecorded_dependencies_.clear();
@@ -648,6 +642,8 @@ void AccessInfoFactory::MergePropertyAccessInfos(
CHECK(!result->empty());
}
+Isolate* AccessInfoFactory::isolate() const { return broker()->isolate(); }
+
namespace {
Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
@@ -760,8 +756,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
MapRef transition_map_ref(broker(), transition_map);
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies(zone());
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
transition_map_ref.SerializeOwnDescriptor(number);
@@ -796,6 +791,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
unrecorded_dependencies.push_back(
dependencies()->TransitionDependencyOffTheRecord(
MapRef(broker(), transition_map)));
+ transition_map_ref.SerializeBackPointer(); // For BuildPropertyStore.
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 3499069fc4..4c7c3611df 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -8,7 +8,6 @@
#include <iosfwd>
#include "src/codegen/machine-type.h"
-#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/types.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/field-index.h"
@@ -25,8 +24,10 @@ class Factory;
namespace compiler {
// Forward declarations.
+class CompilationDependencies;
+class CompilationDependency;
class ElementAccessFeedback;
-class Type;
+class JSHeapBroker;
class TypeCache;
std::ostream& operator<<(std::ostream&, AccessMode);
@@ -74,16 +75,14 @@ class PropertyAccessInfo final {
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies,
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
static PropertyAccessInfo DataConstant(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies,
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
@@ -113,9 +112,9 @@ class PropertyAccessInfo final {
Kind kind() const { return kind_; }
MaybeHandle<JSObject> holder() const {
- // This CHECK tries to protect against using the access info without
- // recording its dependencies first.
- CHECK(unrecorded_dependencies_.empty());
+ // TODO(neis): There was a CHECK here that tries to protect against
+ // using the access info without recording its dependencies first.
+ // Find a more suitable place for it.
return holder_;
}
MaybeHandle<Map> transition_map() const { return transition_map_; }
@@ -127,7 +126,6 @@ class PropertyAccessInfo final {
ZoneVector<Handle<Map>> const& receiver_maps() const {
return receiver_maps_;
}
- Handle<Cell> export_cell() const;
private:
explicit PropertyAccessInfo(Zone* zone);
@@ -136,17 +134,16 @@ class PropertyAccessInfo final {
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
ZoneVector<Handle<Map>>&& receiver_maps);
- PropertyAccessInfo(
- Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
- FieldIndex field_index, Representation field_representation,
- Type field_type, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& receiver_maps,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies);
+ PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map, FieldIndex field_index,
+ Representation field_representation, Type field_type,
+ MaybeHandle<Map> field_map,
+ ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<CompilationDependency const*>&& dependencies);
Kind kind_;
ZoneVector<Handle<Map>> receiver_maps_;
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies_;
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
@@ -215,7 +212,7 @@ class AccessInfoFactory final {
CompilationDependencies* dependencies() const { return dependencies_; }
JSHeapBroker* broker() const { return broker_; }
- Isolate* isolate() const { return broker()->isolate(); }
+ Isolate* isolate() const;
Zone* zone() const { return zone_; }
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.cc b/deps/v8/src/compiler/add-type-assertions-reducer.cc
new file mode 100644
index 0000000000..59d2fe6820
--- /dev/null
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.cc
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/add-type-assertions-reducer.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AddTypeAssertionsReducer::AddTypeAssertionsReducer(Editor* editor,
+ JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ visited_(jsgraph->graph()->NodeCount(), zone) {}
+
+AddTypeAssertionsReducer::~AddTypeAssertionsReducer() = default;
+
+Reduction AddTypeAssertionsReducer::Reduce(Node* node) {
+ if (node->opcode() == IrOpcode::kAssertType ||
+ node->opcode() == IrOpcode::kPhi || !NodeProperties::IsTyped(node) ||
+ visited_.Get(node)) {
+ return NoChange();
+ }
+ visited_.Set(node, true);
+
+ Type type = NodeProperties::GetType(node);
+ if (!type.IsRange()) {
+ return NoChange();
+ }
+
+ Node* assertion = graph()->NewNode(simplified()->AssertType(type), node);
+ NodeProperties::SetType(assertion, type);
+
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ DCHECK(!user->IsDead());
+ if (NodeProperties::IsValueEdge(edge) && user != assertion) {
+ edge.UpdateTo(assertion);
+ Revisit(user);
+ }
+ }
+
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.h b/deps/v8/src/compiler/add-type-assertions-reducer.h
new file mode 100644
index 0000000000..36add040e1
--- /dev/null
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.h
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
+#define V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
+
+#include "src/common/globals.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ AddTypeAssertionsReducer(Editor* editor, JSGraph* jsgraph, Zone* zone);
+ ~AddTypeAssertionsReducer() final;
+
+ const char* reducer_name() const override {
+ return "AddTypeAssertionsReducer";
+ }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ JSGraph* const jsgraph_;
+ NodeAuxData<bool> visited_;
+
+ Graph* graph() { return jsgraph_->graph(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ DISALLOW_COPY_AND_ASSIGN(AddTypeAssertionsReducer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index d93053c64b..88a9c52a33 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -130,6 +130,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
case Constant::kInt64:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(dcarney): loading RPO constants on arm.
case Constant::kRpoNumber:
@@ -308,9 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode,
+ ArmOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -319,9 +320,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
}
}
-void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter& i, Register address) {
+void ComputePoisonedAddressForLoad(
+ CodeGenerator* codegen, InstructionCode opcode,
+ ArmOperandConverter& i, // NOLINT(runtime/references)
+ Register address) {
DCHECK_EQ(kMemoryAccessPoisoned,
static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
switch (AddressingModeField::decode(opcode)) {
@@ -711,8 +713,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -879,23 +881,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r1);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -1752,6 +1752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArmDmbIsh: {
+ __ dmb(ISH);
+ break;
+ }
case kArmDsbIsb: {
__ dsb(SY);
__ isb(SY);
@@ -2588,6 +2592,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmax(NeonU32, scratch, src.low(), src.high());
__ vpmax(NeonU32, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x4AllTrue: {
@@ -2597,6 +2603,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU32, scratch, src.low(), src.high());
__ vpmin(NeonU32, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x8AnyTrue: {
@@ -2607,6 +2615,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmax(NeonU16, scratch, scratch, scratch);
__ vpmax(NeonU16, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x8AllTrue: {
@@ -2617,6 +2627,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU16, scratch, scratch, scratch);
__ vpmin(NeonU16, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x16AnyTrue: {
@@ -2631,6 +2643,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kDoubleRegZero is not changed, since it is 0.
__ vtst(Neon32, q_scratch, q_scratch, q_scratch);
__ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x16AllTrue: {
@@ -2642,6 +2656,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU8, scratch, scratch, scratch);
__ vpmin(NeonU8, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS8, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kWord32AtomicLoadInt8:
@@ -2901,7 +2917,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -2993,8 +3009,14 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(lr, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ } else {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3025,8 +3047,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3074,7 +3096,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 722502edc7..165ca39f9d 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -126,6 +126,7 @@ namespace compiler {
V(ArmPush) \
V(ArmPoke) \
V(ArmPeek) \
+ V(ArmDmbIsh) \
V(ArmDsbIsb) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 211abd85b8..41d7b4055f 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -275,6 +275,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmStr:
case kArmPush:
case kArmPoke:
+ case kArmDmbIsh:
case kArmDsbIsb:
case kArmWord32AtomicPairStore:
case kArmWord32AtomicPairAdd:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 678d75ae5e..06aba4491a 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -441,9 +441,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -2020,6 +2020,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmDmbIsh, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 53864ad2e9..c71a63cc3d 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -224,6 +224,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
+ case Constant::kCompressedHeapObject: // Fall through.
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
case Constant::kDelayedStringConstant:
@@ -375,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -621,8 +622,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -793,19 +794,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0).is(x1));
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ Debug("kArchDebugAbort", 0, BREAK);
+ __ Debug("kArchAbortCSAAssert", 0, BREAK);
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
@@ -867,9 +866,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
__ StoreTaggedField(value, MemOperand(object, offset));
- if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(object, object);
- }
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
eq, ool->entry());
__ Bind(ool->exit());
@@ -1629,6 +1625,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64DmbIsh:
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
case kArm64DsbIsb:
__ Dsb(FullSystem, BarrierAll);
__ Isb();
@@ -2200,6 +2199,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \
+ __ Cmp(i.OutputRegister32(), 0); \
+ __ Cset(i.OutputRegister32(), ne); \
break; \
}
SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
@@ -2399,12 +2400,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Adr(temp, &table);
__ Add(temp, temp, Operand(input, UXTW, 2));
__ Br(temp);
- __ StartBlockPools();
- __ Bind(&table);
- for (size_t index = 0; index < case_count; ++index) {
- __ B(GetLabel(i.InputRpo(index + 2)));
+ {
+ TurboAssembler::BlockPoolsScope block_pools(tasm(),
+ case_count * kInstrSize);
+ __ Bind(&table);
+ for (size_t index = 0; index < case_count; ++index) {
+ __ B(GetLabel(i.InputRpo(index + 2)));
+ }
}
- __ EndBlockPools();
}
void CodeGenerator::FinishFrame(Frame* frame) {
@@ -2437,8 +2440,8 @@ void CodeGenerator::AssembleConstructFrame() {
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
call_descriptor->CalleeSavedRegisters());
@@ -2577,7 +2580,17 @@ void CodeGenerator::AssembleConstructFrame() {
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
} break;
case CallDescriptor::kCallAddress:
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ required_slots += 2; // marker + saved c_entry_fp.
+ }
__ Claim(required_slots);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
+ __ Str(scratch,
+ MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ }
break;
default:
UNREACHABLE();
@@ -2654,7 +2667,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2669,6 +2682,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
__ Mov(dst, src_object);
}
+ } else if (src.type() == Constant::kCompressedHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ // TODO(v8:8977): Even though this mov happens on 32 bits (Note the
+ // .W()) and we are passing along the RelocInfo, we still haven't made
+ // the address embedded in the code-stream actually be compressed.
+ __ Mov(dst.W(),
+ Immediate(src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
+ }
} else {
__ Mov(dst, g.ToImmediate(source));
}
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 4b7b017111..1c4c0e3335 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -171,6 +171,7 @@ namespace compiler {
V(Arm64CompressSigned) \
V(Arm64CompressPointer) \
V(Arm64CompressAny) \
+ V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 502b9d7d82..8344887ec2 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -319,6 +319,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
+ case kArm64DmbIsh:
case kArm64DsbIsb:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 69d82b4993..a953e35a66 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -535,9 +535,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -676,10 +676,11 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
- // must check kArithmeticImm as well as kLoadStoreImm64.
- if (g.CanBeImmediate(index, kArithmeticImm) &&
- g.CanBeImmediate(index, kLoadStoreImm64)) {
+ // OutOfLineRecordWrite uses the index in an add or sub instruction, but we
+ // can trust the assembler to generate extra instructions if the index does
+ // not fit into add or sub. So here only check the immediate for a store.
+ if (g.CanBeImmediate(index, COMPRESS_POINTERS_BOOL ? kLoadStoreImm32
+ : kLoadStoreImm64)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
@@ -1599,7 +1600,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// 32-bit operations will write their result in a W register (implicitly
// clearing the top 32-bit of the corresponding X register) so the
// zero-extension is a no-op.
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
}
case IrOpcode::kLoad: {
@@ -1610,7 +1611,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
default:
break;
@@ -1646,29 +1647,75 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressed);
+ InstructionCode opcode = kArm64LdrDecompressAnyTagged;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressed;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value));
+ }
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressPointer, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedPointer);
+ InstructionCode opcode = kArm64LdrDecompressTaggedPointer;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressedPointer;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressPointer, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ }
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressSigned, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedSigned);
+ InstructionCode opcode = kArm64LdrDecompressTaggedSigned;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressedSigned;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressSigned, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ }
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
- Node* value = node->InputAt(0);
// The top 32 bits in the 64-bit register will be undefined, and
// must not be used by a dependent node.
- Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
+ EmitIdentity(node);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
@@ -2451,7 +2498,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count();
size_t lookup_time_cost = sw.case_count();
- if (sw.case_count() > 0 &&
+ if (sw.case_count() > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value() > std::numeric_limits<int32_t>::min() &&
@@ -2755,6 +2802,11 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
return VisitRRR(this, kArm64Float64Mul, node);
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64DmbIsh, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index bb83a8497b..9ce92dadaa 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -1210,6 +1210,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
literal = DeoptimizationLiteral(constant.ToHeapObject());
break;
+ case Constant::kCompressedHeapObject:
+ DCHECK_EQ(MachineRepresentation::kCompressed, type.representation());
+ literal = DeoptimizationLiteral(constant.ToHeapObject());
+ break;
case Constant::kDelayedStringConstant:
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
literal = DeoptimizationLiteral(constant.ToDelayedStringConstant());
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 0e61c22cbb..ed4be7a47c 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -81,6 +81,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
return Immediate(constant.ToHeapObject());
+ case Constant::kCompressedHeapObject:
+ break;
case Constant::kDelayedStringConstant:
return Immediate::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
@@ -462,6 +464,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
}
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ Register dst = i.OutputRegister(); \
+ Operand src = i.InputOperand(0); \
+ Register tmp = i.TempRegister(0); \
+ __ mov(tmp, Immediate(1)); \
+ __ xor_(dst, dst); \
+ __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \
+ __ opcode(kScratchDoubleReg, src); \
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \
+ __ cmov(zero, dst, tmp); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -674,8 +689,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -870,17 +885,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == edx);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
__ int3();
break;
@@ -1204,7 +1217,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchWordPoisonOnSpeculation:
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
- case kLFence:
+ case kIA32MFence:
+ __ mfence();
+ break;
+ case kIA32LFence:
__ lfence();
break;
case kSSEFloat32Cmp:
@@ -3663,18 +3679,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmov(zero, dst, tmp);
break;
}
+ // Need to split up all the different lane structures because the
+ // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
+ // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
+ // respectively.
case kIA32S1x4AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
+ break;
case kIA32S1x8AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
+ break;
case kIA32S1x16AllTrue: {
- Register dst = i.OutputRegister();
- Operand src = i.InputOperand(0);
- Register tmp = i.TempRegister(0);
- __ mov(tmp, Immediate(1));
- __ xor_(dst, dst);
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(kScratchDoubleReg, src);
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
- __ cmov(zero, dst, tmp);
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
case kIA32StackCheck: {
@@ -4224,6 +4240,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ push(ebp);
__ mov(ebp, esp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -4254,8 +4275,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -4629,6 +4650,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
+#undef ASSEMBLE_SIMD_ALL_TRUE
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 60ed1cc29c..56dea82fe2 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -44,7 +44,8 @@ namespace compiler {
V(IA32Tzcnt) \
V(IA32Popcnt) \
V(IA32Bswap) \
- V(LFence) \
+ V(IA32MFence) \
+ V(IA32LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index f2d5cc0d17..15f69b991c 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -365,7 +365,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32PushFloat64:
case kIA32PushSimd128:
case kIA32Poke:
- case kLFence:
+ case kIA32MFence:
+ case kIA32LFence:
return kHasSideEffect;
case kIA32Word32AtomicPairLoad:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index f81b88823e..e1fc66b4ba 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -272,9 +272,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
IA32OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1593,6 +1593,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32MFence, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 068164b57e..1085de2196 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -82,7 +82,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
- V(ArchDebugAbort) \
+ V(ArchAbortCSAAssert) \
V(ArchDebugBreak) \
V(ArchComment) \
V(ArchThrowTerminator) \
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index b0637c175d..538af71bb4 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -298,7 +298,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchTailCallCodeObject:
case kArchTailCallAddress:
case kArchTailCallWasm:
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
case kArchDebugBreak:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index 21edc2f503..a3f62e7ba4 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) {
// Helper struct containing data about a table or lookup switch.
class SwitchInfo {
public:
- SwitchInfo(ZoneVector<CaseInfo>& cases, int32_t min_value, int32_t max_value,
- BasicBlock* default_branch)
+ SwitchInfo(ZoneVector<CaseInfo>& cases, // NOLINT(runtime/references)
+ int32_t min_value, int32_t max_value, BasicBlock* default_branch)
: cases_(cases),
min_value_(min_value),
max_value_(max_value),
@@ -109,13 +109,9 @@ class OperandGenerator {
}
InstructionOperand DefineAsConstant(Node* node) {
- return DefineAsConstant(node, ToConstant(node));
- }
-
- InstructionOperand DefineAsConstant(Node* node, Constant constant) {
selector()->MarkAsDefined(node);
int virtual_register = GetVReg(node);
- sequence()->AddConstant(virtual_register, constant);
+ sequence()->AddConstant(virtual_register, ToConstant(node));
return ConstantOperand(virtual_register);
}
@@ -326,6 +322,8 @@ class OperandGenerator {
}
case IrOpcode::kHeapConstant:
return Constant(HeapConstantOf(node->op()));
+ case IrOpcode::kCompressedHeapConstant:
+ return Constant(HeapConstantOf(node->op()), true);
case IrOpcode::kDelayedStringConstant:
return Constant(StringConstantBaseOf(node->op()));
case IrOpcode::kDeadValue: {
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 2b748a188b..11ba910405 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -8,6 +8,7 @@
#include "src/base/adapters.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
@@ -24,7 +25,7 @@ InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
- EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
@@ -54,7 +55,8 @@ InstructionSelector::InstructionSelector(
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
- trace_turbo_(trace_turbo) {
+ trace_turbo_(trace_turbo),
+ tick_counter_(tick_counter) {
instructions_.reserve(node_count);
continuation_inputs_.reserve(5);
continuation_outputs_.reserve(2);
@@ -1078,7 +1080,8 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kProtectedLoad ||
- node->opcode() == IrOpcode::kProtectedStore) {
+ node->opcode() == IrOpcode::kProtectedStore ||
+ node->opcode() == IrOpcode::kMemoryBarrier) {
++effect_level;
}
}
@@ -1251,6 +1254,7 @@ void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
}
void InstructionSelector::VisitNode(Node* node) {
+ tick_counter_->DoTick();
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) {
case IrOpcode::kStart:
@@ -1301,6 +1305,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kCompressedHeapConstant:
+ return MarkAsCompressed(node), VisitConstant(node);
case IrOpcode::kNumberConstant: {
double value = OpParameter<double>(node->op());
if (!IsSmiDouble(value)) MarkAsReference(node);
@@ -1324,8 +1330,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
- case IrOpcode::kDebugAbort:
- VisitDebugAbort(node);
+ case IrOpcode::kAbortCSAAssert:
+ VisitAbortCSAAssert(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
@@ -1474,6 +1480,7 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
@@ -1734,6 +1741,8 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
+ case IrOpcode::kMemoryBarrier:
+ return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1808,6 +1817,24 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
+ case IrOpcode::kF64x2Splat:
+ return MarkAsSimd128(node), VisitF64x2Splat(node);
+ case IrOpcode::kF64x2ExtractLane:
+ return MarkAsFloat64(node), VisitF64x2ExtractLane(node);
+ case IrOpcode::kF64x2ReplaceLane:
+ return MarkAsSimd128(node), VisitF64x2ReplaceLane(node);
+ case IrOpcode::kF64x2Abs:
+ return MarkAsSimd128(node), VisitF64x2Abs(node);
+ case IrOpcode::kF64x2Neg:
+ return MarkAsSimd128(node), VisitF64x2Neg(node);
+ case IrOpcode::kF64x2Eq:
+ return MarkAsSimd128(node), VisitF64x2Eq(node);
+ case IrOpcode::kF64x2Ne:
+ return MarkAsSimd128(node), VisitF64x2Ne(node);
+ case IrOpcode::kF64x2Lt:
+ return MarkAsSimd128(node), VisitF64x2Lt(node);
+ case IrOpcode::kF64x2Le:
+ return MarkAsSimd128(node), VisitF64x2Le(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1846,6 +1873,38 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
return MarkAsSimd128(node), VisitF32x4Le(node);
+ case IrOpcode::kI64x2Splat:
+ return MarkAsSimd128(node), VisitI64x2Splat(node);
+ case IrOpcode::kI64x2ExtractLane:
+ return MarkAsWord64(node), VisitI64x2ExtractLane(node);
+ case IrOpcode::kI64x2ReplaceLane:
+ return MarkAsSimd128(node), VisitI64x2ReplaceLane(node);
+ case IrOpcode::kI64x2Neg:
+ return MarkAsSimd128(node), VisitI64x2Neg(node);
+ case IrOpcode::kI64x2Shl:
+ return MarkAsSimd128(node), VisitI64x2Shl(node);
+ case IrOpcode::kI64x2ShrS:
+ return MarkAsSimd128(node), VisitI64x2ShrS(node);
+ case IrOpcode::kI64x2Add:
+ return MarkAsSimd128(node), VisitI64x2Add(node);
+ case IrOpcode::kI64x2Sub:
+ return MarkAsSimd128(node), VisitI64x2Sub(node);
+ case IrOpcode::kI64x2Mul:
+ return MarkAsSimd128(node), VisitI64x2Mul(node);
+ case IrOpcode::kI64x2Eq:
+ return MarkAsSimd128(node), VisitI64x2Eq(node);
+ case IrOpcode::kI64x2Ne:
+ return MarkAsSimd128(node), VisitI64x2Ne(node);
+ case IrOpcode::kI64x2GtS:
+ return MarkAsSimd128(node), VisitI64x2GtS(node);
+ case IrOpcode::kI64x2GeS:
+ return MarkAsSimd128(node), VisitI64x2GeS(node);
+ case IrOpcode::kI64x2ShrU:
+ return MarkAsSimd128(node), VisitI64x2ShrU(node);
+ case IrOpcode::kI64x2GtU:
+ return MarkAsSimd128(node), VisitI64x2GtU(node);
+ case IrOpcode::kI64x2GeU:
+ return MarkAsSimd128(node), VisitI64x2GeU(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -2028,6 +2087,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
+ case IrOpcode::kS1x2AnyTrue:
+ return MarkAsWord32(node), VisitS1x2AnyTrue(node);
+ case IrOpcode::kS1x2AllTrue:
+ return MarkAsWord32(node), VisitS1x2AllTrue(node);
case IrOpcode::kS1x4AnyTrue:
return MarkAsWord32(node), VisitS1x4AnyTrue(node);
case IrOpcode::kS1x4AllTrue:
@@ -2489,6 +2552,36 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
+#if !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64
+
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
@@ -2962,7 +3055,7 @@ void InstructionSelector::CanonicalizeShuffle(bool inputs_equal,
void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
bool* is_swizzle) {
// Get raw shuffle indices.
- memcpy(shuffle, OpParameter<uint8_t*>(node->op()), kSimd128Size);
+ memcpy(shuffle, S8x16ShuffleOf(node->op()), kSimd128Size);
bool needs_swap;
bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
GetVirtualRegister(node->InputAt(1));
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 4f6b1c5971..16f88bb516 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -19,6 +19,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -266,7 +269,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
- EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
@@ -496,11 +499,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
VectorSlotPair const& feedback,
Node* frame_state);
- void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
- void EmitLookupSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand);
- void EmitBinarySearchSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand);
+ void EmitTableSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& index_operand); // NOLINT(runtime/references)
+ void EmitLookupSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& value_operand); // NOLINT(runtime/references)
+ void EmitBinarySearchSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& value_operand); // NOLINT(runtime/references)
void TryRename(InstructionOperand* op);
int GetRename(int virtual_register);
@@ -604,6 +611,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
+ // Visit the load node with a value and opcode to replace with.
+ void VisitLoad(Node* node, Node* value, InstructionCode opcode);
void VisitFinishRegion(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
@@ -772,6 +781,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
EnableTraceTurboJson trace_turbo_;
+ TickCounter* const tick_counter_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index c52dca61a1..09c7fe22c5 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -530,7 +530,7 @@ Constant::Constant(RelocatablePtrConstantInfo info) {
}
Handle<HeapObject> Constant::ToHeapObject() const {
- DCHECK_EQ(kHeapObject, type());
+ DCHECK(kHeapObject == type() || kCompressedHeapObject == type());
Handle<HeapObject> value(
reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
return value;
@@ -561,7 +561,8 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
return os << constant.ToFloat64().value();
case Constant::kExternalReference:
return os << constant.ToExternalReference().address();
- case Constant::kHeapObject:
+ case Constant::kHeapObject: // Fall through.
+ case Constant::kCompressedHeapObject:
return os << Brief(*constant.ToHeapObject());
case Constant::kRpoNumber:
return os << "RPO" << constant.ToRpoNumber().ToInt();
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 61875a1a17..9b32204055 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1007,6 +1007,7 @@ class V8_EXPORT_PRIVATE Constant final {
kFloat32,
kFloat64,
kExternalReference,
+ kCompressedHeapObject,
kHeapObject,
kRpoNumber,
kDelayedStringConstant
@@ -1018,8 +1019,9 @@ class V8_EXPORT_PRIVATE Constant final {
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
explicit Constant(ExternalReference ref)
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref.address())) {}
- explicit Constant(Handle<HeapObject> obj)
- : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
+ explicit Constant(Handle<HeapObject> obj, bool is_compressed = false)
+ : type_(is_compressed ? kCompressedHeapObject : kHeapObject),
+ value_(bit_cast<intptr_t>(obj)) {}
explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
explicit Constant(const StringConstantBase* str)
: type_(kDelayedStringConstant), value_(bit_cast<intptr_t>(str)) {}
diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h
index e23dd45359..ce60ebcb2e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.h
+++ b/deps/v8/src/compiler/backend/jump-threading.h
@@ -17,14 +17,17 @@ class V8_EXPORT_PRIVATE JumpThreading {
public:
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
- static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
- InstructionSequence* code, bool frame_at_start);
+ static bool ComputeForwarding(
+ Zone* local_zone,
+ ZoneVector<RpoNumber>& result, // NOLINT(runtime/references)
+ InstructionSequence* code, bool frame_at_start);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(Zone* local_zone,
- ZoneVector<RpoNumber>& forwarding,
- InstructionSequence* code);
+ static void ApplyForwarding(
+ Zone* local_zone,
+ ZoneVector<RpoNumber>& forwarding, // NOLINT(runtime/references)
+ InstructionSequence* code);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc
index 6ed0416045..0a0aadfad1 100644
--- a/deps/v8/src/compiler/backend/live-range-separator.cc
+++ b/deps/v8/src/compiler/backend/live-range-separator.cc
@@ -9,15 +9,16 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+#define TRACE_COND(cond, ...) \
+ do { \
+ if (cond) PrintF(__VA_ARGS__); \
} while (false)
namespace {
void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
- LifetimePosition first_cut, LifetimePosition last_cut) {
+ LifetimePosition first_cut, LifetimePosition last_cut,
+ bool trace_alloc) {
DCHECK(!range->IsSplinter());
// We can ignore ranges that live solely in deferred blocks.
// If a range ends right at the end of a deferred block, it is marked by
@@ -49,9 +50,10 @@ void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
range->SetSplinter(splinter);
}
Zone* zone = data->allocation_zone();
- TRACE("creating splinter %d for range %d between %d and %d\n",
- range->splinter()->vreg(), range->vreg(), start.ToInstructionIndex(),
- end.ToInstructionIndex());
+ TRACE_COND(trace_alloc,
+ "creating splinter %d for range %d between %d and %d\n",
+ range->splinter()->vreg(), range->vreg(),
+ start.ToInstructionIndex(), end.ToInstructionIndex());
range->Splinter(start, end, zone);
}
}
@@ -102,7 +104,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
current_block->last_instruction_index());
} else {
if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, last_cut);
+ CreateSplinter(range, data, first_cut, last_cut,
+ data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
@@ -116,7 +119,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
// have to connect blocks anyway, so we can also splinter to the end of the
// block, too.
if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, interval_end);
+ CreateSplinter(range, data, first_cut, interval_end,
+ data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
@@ -186,7 +190,7 @@ void LiveRangeMerger::Merge() {
}
}
-#undef TRACE
+#undef TRACE_COND
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 1f79386821..5cec4a8a16 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -80,6 +80,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
@@ -264,8 +265,9 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(
+ bool& predicate, // NOLINT(runtime/references)
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -301,9 +303,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -662,8 +664,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -778,6 +780,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &return_location);
+ __ sw(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -785,6 +794,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -816,22 +827,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == a0);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -1611,6 +1620,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
}
+ case kMipsSync: {
+ __ sync();
+ break;
+ }
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
@@ -3157,7 +3170,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3376,8 +3389,14 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3387,7 +3406,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3397,12 +3417,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ lw(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3564,6 +3588,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
break;
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index ba64e59429..44e53ac044 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -134,6 +134,7 @@ namespace compiler {
V(MipsStackClaim) \
V(MipsSeb) \
V(MipsSeh) \
+ V(MipsSync) \
V(MipsS128Zero) \
V(MipsI32x4Splat) \
V(MipsI32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 26a3e808cc..92ab3f9344 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -284,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUsh:
case kMipsUsw:
case kMipsUswc1:
+ case kMipsSync:
case kMipsWord32AtomicPairStore:
case kMipsWord32AtomicPairAdd:
case kMipsWord32AtomicPairSub:
@@ -1352,7 +1353,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
return CallLatency() + 1;
case kArchComment:
case kArchDeoptimize:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 0c7299d451..452e92a174 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -274,9 +274,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
MipsOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1775,6 +1775,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
arraysize(temps), temps);
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 5cd9bc54eb..f746b52df6 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -82,6 +82,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
@@ -277,8 +278,9 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(
+ bool& predicate, // NOLINT(runtime/references)
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -309,9 +311,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -634,8 +636,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -756,6 +758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &return_location);
+ __ sd(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -763,6 +772,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -794,22 +805,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == a0);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
@@ -1786,6 +1795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
}
+ case kMips64Sync: {
+ __ sync();
+ break;
+ }
case kMips64Push:
if (instr->InputAt(0)->IsFPRegister()) {
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -3304,7 +3317,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3535,8 +3548,14 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Dsubu(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3546,7 +3565,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3556,12 +3576,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ ld(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Dsubu(sp, sp, Operand(kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3723,6 +3747,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
break;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 24f01b1af1..e375ee8d07 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -163,6 +163,7 @@ namespace compiler {
V(Mips64StackClaim) \
V(Mips64Seb) \
V(Mips64Seh) \
+ V(Mips64Sync) \
V(Mips64AssertEqual) \
V(Mips64S128Zero) \
V(Mips64I32x4Splat) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 499a3da05a..4dcafe4197 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -318,6 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ush:
case kMips64Usw:
case kMips64Uswc1:
+ case kMips64Sync:
case kMips64Word64AtomicStoreWord8:
case kMips64Word64AtomicStoreWord16:
case kMips64Word64AtomicStoreWord32:
@@ -1263,7 +1264,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchLookupSwitchLatency(instr);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 9768a7da9b..95f11ebed1 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -334,9 +334,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -1946,7 +1946,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
// in those cases. Unfortunately, the solution is not complete because
// it might skip cases where Word32 full compare is needed, so
// basically it is a hack.
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+#ifndef USE_SIMULATOR
if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
+#else
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
+ node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall ) {
+#endif
VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
} else {
VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
@@ -2398,6 +2408,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Sync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 30605df270..5289812cb5 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -79,6 +79,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case Constant::kDelayedStringConstant:
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -262,8 +263,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, Instruction* instr,
+ PPCOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -877,8 +879,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1019,6 +1021,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ constexpr int offset = 12;
+ if (isWasmCapiFunction) {
+ __ mflr(kScratchReg);
+ __ bind(&start_call);
+ __ LoadPC(r0);
+ __ addi(r0, r0, Operand(offset));
+ __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mtlr(r0);
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1026,6 +1040,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ // TODO(miladfar): In the above block, r0 must be populated with the
+ // strictly-correct PC, which is the return address at this spot. The
+ // offset is set to 12 right now, which is counted from where we are
+ // binding to the label and ends at this spot. If failed, replace it it
+ // with the correct offset suggested. More info on f5ab7d3.
+ if (isWasmCapiFunction)
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1060,22 +1083,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r4);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -1174,6 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kPPC_Sync: {
+ __ sync();
+ break;
+ }
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -2150,7 +2175,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -2304,14 +2329,20 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ mflr(r0);
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(r0, fp, kConstantPoolRegister);
- // Adjust FP to point to saved FP.
- __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ addi(sp, sp, Operand(-kSystemPointerSize));
} else {
- __ Push(r0, fp);
- __ mr(fp, sp);
+ __ mflr(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ } else {
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+ }
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
@@ -2325,7 +2356,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -2335,12 +2367,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ addi(sp, sp, Operand(-kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2389,7 +2425,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
@@ -2554,6 +2590,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
break;
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index a34a09b796..f37529bd88 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -13,6 +13,7 @@ namespace compiler {
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(PPC_Peek) \
+ V(PPC_Sync) \
V(PPC_And) \
V(PPC_AndComplement) \
V(PPC_Or) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index e5f7d7e45a..61c2d2be3b 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -143,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_Push:
case kPPC_PushFrame:
case kPPC_StoreToStackSlot:
+ case kPPC_Sync:
return kHasSideEffect;
case kPPC_AtomicStoreUint8:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index bb503763c2..bfc77b9412 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -173,9 +173,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
PPCOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1853,6 +1853,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Sync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 57ea2c1a26..44701f8159 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -9,6 +9,7 @@
#include "src/base/adapters.h"
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/linkage.h"
#include "src/strings/string-stream.h"
#include "src/utils/vector.h"
@@ -17,11 +18,13 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+#define TRACE_COND(cond, ...) \
+ do { \
+ if (cond) PrintF(__VA_ARGS__); \
} while (false)
+#define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__)
+
namespace {
static constexpr int kFloat32Bit =
@@ -1119,8 +1122,9 @@ void TopLevelLiveRange::Verify() const {
}
}
-void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
- TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
+void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(),
+ start.value());
DCHECK_NOT_NULL(first_interval_);
DCHECK(first_interval_->start() <= start);
DCHECK(start < first_interval_->end());
@@ -1128,9 +1132,10 @@ void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
}
void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
- LifetimePosition end, Zone* zone) {
- TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
- end.value());
+ LifetimePosition end, Zone* zone,
+ bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(),
+ start.value(), end.value());
LifetimePosition new_end = end;
while (first_interval_ != nullptr && first_interval_->start() <= end) {
if (first_interval_->end() > end) {
@@ -1148,9 +1153,10 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
}
void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
- LifetimePosition end, Zone* zone) {
- TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
- end.value());
+ LifetimePosition end, Zone* zone,
+ bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(),
+ start.value(), end.value());
if (first_interval_ == nullptr) {
UseInterval* interval = new (zone) UseInterval(start, end);
first_interval_ = interval;
@@ -1173,9 +1179,10 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
}
}
-void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
+void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) {
LifetimePosition pos = use_pos->pos();
- TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
+ TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(),
+ pos.value());
UsePosition* prev_hint = nullptr;
UsePosition* prev = nullptr;
UsePosition* current = first_pos_;
@@ -1309,13 +1316,8 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os,
if (range->spilled()) {
prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
} else {
- const char* reg_name;
- if (range->assigned_register() == kUnassignedRegister) {
- reg_name = "???";
- } else {
- reg_name = RegisterName(range->assigned_register());
- }
- prefix = snprintf(buffer, max_prefix_length, "|%s", reg_name);
+ prefix = snprintf(buffer, max_prefix_length, "|%s",
+ RegisterName(range->assigned_register()));
}
os << buffer;
position += std::min(prefix, max_prefix_length - 1);
@@ -1469,7 +1471,7 @@ void RegisterAllocationData::PhiMapValue::CommitAssignment(
RegisterAllocationData::RegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, RegisterAllocationFlags flags,
- const char* debug_name)
+ TickCounter* tick_counter, const char* debug_name)
: allocation_zone_(zone),
frame_(frame),
code_(code),
@@ -1496,7 +1498,8 @@ RegisterAllocationData::RegisterAllocationData(
preassigned_slot_ranges_(zone),
spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
zone),
- flags_(flags) {
+ flags_(flags),
+ tick_counter_(tick_counter) {
if (!kSimpleFPAliasing) {
fixed_float_live_ranges_.resize(
kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
@@ -1815,6 +1818,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
void ConstraintBuilder::MeetRegisterConstraints() {
for (InstructionBlock* block : code()->instruction_blocks()) {
+ data_->tick_counter()->DoTick();
MeetRegisterConstraints(block);
}
}
@@ -1973,14 +1977,6 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
second->reference_map(), &gap_move->source()};
data()->delayed_references().push_back(delayed_reference);
}
- } else if (!code()->IsReference(input_vreg) &&
- code()->IsReference(output_vreg)) {
- // The input is assumed to immediately have a tagged representation,
- // before the pointer map can be used. I.e. the pointer map at the
- // instruction will include the output operand (whose value at the
- // beginning of the instruction is equal to the input operand). If
- // this is not desired, then the pointer map at this instruction needs
- // to be adjusted manually.
}
}
}
@@ -1988,6 +1984,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
void ConstraintBuilder::ResolvePhis() {
// Process the blocks in reverse order.
for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
+ data_->tick_counter()->DoTick();
ResolvePhis(block);
}
}
@@ -2071,7 +2068,8 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
- range->AddUseInterval(start, end, allocation_zone());
+ range->AddUseInterval(start, end, allocation_zone(),
+ data()->is_trace_alloc());
iterator.Advance();
}
}
@@ -2192,16 +2190,18 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start() > position) {
// Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextStart(), allocation_zone());
- range->AddUsePosition(NewUsePosition(position.NextStart()));
+ range->AddUseInterval(position, position.NextStart(), allocation_zone(),
+ data()->is_trace_alloc());
+ range->AddUsePosition(NewUsePosition(position.NextStart()),
+ data()->is_trace_alloc());
} else {
- range->ShortenTo(position);
+ range->ShortenTo(position, data()->is_trace_alloc());
}
if (!operand->IsUnallocated()) return nullptr;
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
UsePosition* use_pos =
NewUsePosition(position, unalloc_operand, hint, hint_type);
- range->AddUsePosition(use_pos);
+ range->AddUsePosition(use_pos, data()->is_trace_alloc());
return use_pos;
}
@@ -2216,9 +2216,10 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
if (operand->IsUnallocated()) {
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
- range->AddUsePosition(use_pos);
+ range->AddUsePosition(use_pos, data()->is_trace_alloc());
}
- range->AddUseInterval(block_start, position, allocation_zone());
+ range->AddUseInterval(block_start, position, allocation_zone(),
+ data()->is_trace_alloc());
return use_pos;
}
@@ -2279,7 +2280,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int code = config()->GetAllocatableGeneralCode(i);
TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
@@ -2291,7 +2292,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat64, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
// Clobber fixed float registers on archs with non-simple aliasing.
if (!kSimpleFPAliasing) {
@@ -2304,7 +2305,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat32, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
if (fixed_simd128_live_ranges) {
@@ -2314,7 +2315,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kSimd128, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
}
@@ -2574,7 +2575,8 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
- range->EnsureInterval(start, end, allocation_zone());
+ range->EnsureInterval(start, end, allocation_zone(),
+ data()->is_trace_alloc());
iterator.Advance();
}
// Insert all values into the live in sets of all blocks in the loop.
@@ -2588,6 +2590,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
+ data_->tick_counter()->DoTick();
InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
BitVector* live = ComputeLiveOut(block, data());
@@ -2607,6 +2610,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Postprocess the ranges.
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* range : data()->live_ranges()) {
+ data_->tick_counter()->DoTick();
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (range == nullptr) continue;
@@ -2773,7 +2777,7 @@ void BundleBuilder::BuildBundles() {
LiveRangeBundle* input_bundle = input_range->get_bundle();
if (input_bundle != nullptr) {
TRACE("Merge\n");
- if (out->TryMerge(input_bundle))
+ if (out->TryMerge(input_bundle, data()->is_trace_alloc()))
TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
out->id());
} else {
@@ -2798,7 +2802,7 @@ bool LiveRangeBundle::TryAddRange(LiveRange* range) {
InsertUses(range->first_interval());
return true;
}
-bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
+bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) {
if (other == this) return true;
auto iter1 = uses_.begin();
@@ -2810,8 +2814,8 @@ bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
} else if (iter2->start > iter1->end) {
++iter1;
} else {
- TRACE("No merge %d:%d %d:%d\n", iter1->start, iter1->end, iter2->start,
- iter2->end);
+ TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
+ iter1->end, iter2->start, iter2->end);
return false;
}
}
@@ -3042,6 +3046,7 @@ void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
}
const char* RegisterAllocator::RegisterName(int register_code) const {
+ if (register_code == kUnassignedRegister) return "unassigned";
return mode() == GENERAL_REGISTERS
? i::RegisterName(Register::from_code(register_code))
: i::RegisterName(DoubleRegister::from_code(register_code));
@@ -3408,7 +3413,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors(
to_be_live->emplace(val.first, reg);
TRACE("Reset %d as live due vote %zu in %s\n",
val.first->TopLevel()->vreg(), val.second.count,
- reg == kUnassignedRegister ? "unassigned" : RegisterName(reg));
+ RegisterName(reg));
}
}
};
@@ -3477,6 +3482,8 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
RegisterName(other->assigned_register()));
LiveRange* split_off =
other->SplitAt(next_start, data()->allocation_zone());
+ // Try to get the same register after the deferred block.
+ split_off->set_controlflow_hint(other->assigned_register());
DCHECK_NE(split_off, other);
AddToUnhandled(split_off);
update_caches(other);
@@ -3574,7 +3581,7 @@ void LinearScanAllocator::AllocateRegisters() {
SplitAndSpillRangesDefinedByMemoryOperand();
data()->ResetSpillState();
- if (FLAG_trace_alloc) {
+ if (data()->is_trace_alloc()) {
PrintRangeOverview(std::cout);
}
@@ -3642,6 +3649,7 @@ void LinearScanAllocator::AllocateRegisters() {
while (!unhandled_live_ranges().empty() ||
(data()->is_turbo_control_flow_aware_allocation() &&
last_block < max_blocks)) {
+ data()->tick_counter()->DoTick();
LiveRange* current = unhandled_live_ranges().empty()
? nullptr
: *unhandled_live_ranges().begin();
@@ -3824,7 +3832,7 @@ void LinearScanAllocator::AllocateRegisters() {
ProcessCurrentRange(current, spill_mode);
}
- if (FLAG_trace_alloc) {
+ if (data()->is_trace_alloc()) {
PrintRangeOverview(std::cout);
}
}
@@ -4557,6 +4565,14 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LiveRange* third_part =
SplitBetween(second_part, split_start, third_part_end);
+ if (GetInstructionBlock(data()->code(), second_part->Start())
+ ->IsDeferred()) {
+ // Try to use the same register as before.
+ TRACE("Setting control flow hint for %d:%d to %s\n",
+ third_part->TopLevel()->vreg(), third_part->relative_id(),
+ RegisterName(range->controlflow_hint()));
+ third_part->set_controlflow_hint(range->controlflow_hint());
+ }
AddToUnhandled(third_part);
// This can happen, even if we checked for start < end above, as we fiddle
@@ -4601,6 +4617,7 @@ OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
void OperandAssigner::DecideSpillingMode() {
if (data()->is_turbo_control_flow_aware_allocation()) {
for (auto range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
int max_blocks = data()->code()->InstructionBlockCount();
if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
// If the range is spilled only in deferred blocks and starts in
@@ -4629,6 +4646,7 @@ void OperandAssigner::DecideSpillingMode() {
void OperandAssigner::AssignSpillSlots() {
for (auto range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
if (range != nullptr && range->get_bundle() != nullptr) {
range->get_bundle()->MergeSpillRanges();
}
@@ -4636,6 +4654,7 @@ void OperandAssigner::AssignSpillSlots() {
ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
// Merge disjoint spill ranges
for (size_t i = 0; i < spill_ranges.size(); ++i) {
+ data()->tick_counter()->DoTick();
SpillRange* range = spill_ranges[i];
if (range == nullptr) continue;
if (range->IsEmpty()) continue;
@@ -4648,6 +4667,7 @@ void OperandAssigner::AssignSpillSlots() {
}
// Allocate slots for the merged spill ranges.
for (SpillRange* range : spill_ranges) {
+ data()->tick_counter()->DoTick();
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) {
@@ -4660,6 +4680,7 @@ void OperandAssigner::AssignSpillSlots() {
void OperandAssigner::CommitAssignment() {
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* top_range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (top_range == nullptr || top_range->IsEmpty()) continue;
@@ -4859,6 +4880,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
+ data()->tick_counter()->DoTick();
int vreg = iterator.Current();
LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) {
@@ -5130,6 +5152,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
}
#undef TRACE
+#undef TRACE_COND
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 8929fb2ee6..55f8a8dd1f 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -16,6 +16,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
static const int32_t kUnassignedRegister = RegisterConfiguration::kMaxRegisters;
@@ -175,7 +178,8 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos);
enum class RegisterAllocationFlag : unsigned {
kTurboControlFlowAwareAllocation = 1 << 0,
- kTurboPreprocessRanges = 1 << 1
+ kTurboPreprocessRanges = 1 << 1,
+ kTraceAllocation = 1 << 2
};
using RegisterAllocationFlags = base::Flags<RegisterAllocationFlag>;
@@ -198,6 +202,10 @@ class RegisterAllocationData final : public ZoneObject {
return flags_ & RegisterAllocationFlag::kTurboPreprocessRanges;
}
+ bool is_trace_alloc() {
+ return flags_ & RegisterAllocationFlag::kTraceAllocation;
+ }
+
static constexpr int kNumberOfFixedRangesPerRegister = 2;
class PhiMapValue : public ZoneObject {
@@ -238,6 +246,7 @@ class RegisterAllocationData final : public ZoneObject {
Zone* allocation_zone, Frame* frame,
InstructionSequence* code,
RegisterAllocationFlags flags,
+ TickCounter* tick_counter,
const char* debug_name = nullptr);
const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
@@ -328,6 +337,8 @@ class RegisterAllocationData final : public ZoneObject {
void ResetSpillState() { spill_state_.clear(); }
+ TickCounter* tick_counter() { return tick_counter_; }
+
private:
int GetNextLiveRangeId();
@@ -354,6 +365,7 @@ class RegisterAllocationData final : public ZoneObject {
RangesWithPreassignedSlots preassigned_slot_ranges_;
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
RegisterAllocationFlags flags_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
@@ -741,7 +753,7 @@ class LiveRangeBundle : public ZoneObject {
: ranges_(zone), uses_(zone), id_(id) {}
bool TryAddRange(LiveRange* range);
- bool TryMerge(LiveRangeBundle* other);
+ bool TryMerge(LiveRangeBundle* other, bool trace_alloc);
ZoneSet<LiveRange*, LiveRangeOrdering> ranges_;
ZoneSet<Range, RangeOrdering> uses_;
@@ -785,12 +797,14 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
// Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUsePosition(UsePosition* pos);
+ void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone,
+ bool trace_alloc);
+ void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone,
+ bool trace_alloc);
+ void AddUsePosition(UsePosition* pos, bool trace_alloc);
// Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
+ void ShortenTo(LifetimePosition start, bool trace_alloc);
// Detaches between start and end, and attributes the resulting range to
// result.
@@ -1279,11 +1293,13 @@ class LinearScanAllocator final : public RegisterAllocator {
RangeWithRegister::Equals>;
void MaybeUndoPreviousSplit(LiveRange* range);
- void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position, SpillMode spill_mode);
+ void SpillNotLiveRanges(
+ RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
+ LifetimePosition position, SpillMode spill_mode);
LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
- void ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position);
+ void ReloadLiveRanges(
+ RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
+ LifetimePosition position);
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block);
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 595800268d..6457b7c8b4 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -73,6 +73,7 @@ class S390OperandConverter final : public InstructionOperandConverter {
case Constant::kDelayedStringConstant:
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -1245,8 +1246,9 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, Instruction* instr,
+ S390OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -1380,8 +1382,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1509,6 +1511,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ // Put the return address in a stack slot.
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ larl(r0, &return_location);
+ __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1516,6 +1525,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1547,22 +1558,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r3);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -2891,7 +2900,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3014,8 +3023,14 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(r14, fp);
- __ LoadRR(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ } else {
+ __ Push(r14, fp);
+ __ LoadRR(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
if (call_descriptor->PushArgumentCount()) {
@@ -3028,7 +3043,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3038,12 +3054,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -3089,7 +3109,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
@@ -3247,6 +3267,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ break;
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
break;
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index d982605efc..99d3b0fa0f 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -447,11 +447,13 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
#endif
template <class CanCombineWithLoad>
-void GenerateRightOperands(InstructionSelector* selector, Node* node,
- Node* right, InstructionCode& opcode,
- OperandModes& operand_mode,
- InstructionOperand* inputs, size_t& input_count,
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateRightOperands(
+ InstructionSelector* selector, Node* node, Node* right,
+ InstructionCode& opcode, // NOLINT(runtime/references)
+ OperandModes& operand_mode, // NOLINT(runtime/references)
+ InstructionOperand* inputs,
+ size_t& input_count, // NOLINT(runtime/references)
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
if ((operand_mode & OperandMode::kAllowImmediate) &&
@@ -491,11 +493,13 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
}
template <class CanCombineWithLoad>
-void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
- Node* left, Node* right, InstructionCode& opcode,
- OperandModes& operand_mode,
- InstructionOperand* inputs, size_t& input_count,
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateBinOpOperands(
+ InstructionSelector* selector, Node* node, Node* left, Node* right,
+ InstructionCode& opcode, // NOLINT(runtime/references)
+ OperandModes& operand_mode, // NOLINT(runtime/references)
+ InstructionOperand* inputs,
+ size_t& input_count, // NOLINT(runtime/references)
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
// left is always register
InstructionOperand const left_input = g.UseRegister(left);
@@ -686,9 +690,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
S390OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -2194,6 +2198,11 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kArchNop, g.NoOutput());
+}
+
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h
index 590a839a06..d3a52b34b7 100644
--- a/deps/v8/src/compiler/backend/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h
@@ -23,6 +23,7 @@ namespace v8 {
namespace internal {
class EhFrameWriter;
+class Zone;
namespace compiler {
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index c6667292fc..a108edeff0 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -349,7 +349,8 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i, int pc) {
+ X64OperandConverter& i, // NOLINT(runtime/references)
+ int pc) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessProtected) {
@@ -357,9 +358,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ X64OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -575,6 +576,19 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \
} while (false)
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ CpuFeatureScope sse_scope(tasm(), SSE4_1); \
+ Register dst = i.OutputRegister(); \
+ Register tmp = i.TempRegister(0); \
+ __ movq(tmp, Immediate(1)); \
+ __ xorq(dst, dst); \
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg); \
+ __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \
+ __ ptest(kScratchDoubleReg, kScratchDoubleReg); \
+ __ cmovq(zero, dst, tmp); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
@@ -752,8 +766,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -952,17 +966,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == rdx);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
__ int3();
unwinding_info_writer_.MarkBlockWillExit();
@@ -1029,9 +1041,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
mode, DetermineStubCallMode());
__ StoreTaggedField(operand, value);
- if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(object, object);
- }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
@@ -1042,7 +1051,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ andq(i.InputRegister(0), kSpeculationPoisonRegister);
break;
- case kLFence:
+ case kX64MFence:
+ __ mfence();
+ break;
+ case kX64LFence:
__ lfence();
break;
case kArchStackSlot: {
@@ -1309,16 +1321,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, 33);
+ __ Andps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, 31);
+ __ Xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Sqrt:
@@ -1517,18 +1529,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
+ case kX64F64x2Abs:
case kSSEFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
- __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, 1);
+ __ Andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
+ case kX64F64x2Neg:
case kSSEFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
- __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, 63);
+ __ Xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat64Sqrt:
@@ -1944,16 +1958,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqCompressTagged: {
@@ -1970,16 +1987,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64DecompressSigned: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressTaggedSigned);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64DecompressPointer: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressTaggedPointer);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64DecompressAny: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressAnyTagged);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64CompressSigned: // Fall through.
@@ -2006,11 +2026,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
- __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movss(operand, i.InputDoubleRegister(index));
+ __ Movss(operand, i.InputDoubleRegister(index));
}
break;
case kX64Movsd: {
@@ -2039,11 +2059,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
- __ movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movdqu(operand, i.InputSimd128Register(index));
+ __ Movdqu(operand, i.InputSimd128Register(index));
}
break;
}
@@ -2065,7 +2085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsRegister()) {
__ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64BitcastLD:
@@ -2235,6 +2255,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F64x2Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ pshufd(dst, i.InputDoubleRegister(0), 0x44);
+ } else {
+ __ pshufd(dst, i.InputOperand(0), 0x44);
+ }
+ break;
+ }
+ case kX64F64x2ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ if (instr->InputAt(2)->IsFPRegister()) {
+ __ movq(kScratchRegister, i.InputDoubleRegister(2));
+ __ pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1));
+ } else {
+ __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64F64x2ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movq(i.OutputDoubleRegister(), kScratchRegister);
+ break;
+ }
+ case kX64F64x2Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
// TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
@@ -2400,6 +2465,171 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I64x2Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(dst, i.InputRegister(0));
+ } else {
+ __ movq(dst, i.InputOperand(0));
+ }
+ __ pshufd(dst, dst, 0x44);
+ break;
+ }
+ case kX64I64x2ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ pinsrq(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64I64x2Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ movapd(kScratchDoubleReg, src);
+ src = kScratchDoubleReg;
+ }
+ __ pxor(dst, dst);
+ __ psubq(dst, src);
+ break;
+ }
+ case kX64I64x2Shl: {
+ __ psllq(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2ShrS: {
+ // TODO(zhin): there is vpsraq but requires AVX512
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ // ShrS on each quadword one at a time
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+
+ // lower quadword
+ __ pextrq(kScratchRegister, src, 0x0);
+ __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
+ __ pinsrq(dst, kScratchRegister, 0x0);
+
+ // upper quadword
+ __ pextrq(kScratchRegister, src, 0x1);
+ __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
+ __ pinsrq(dst, kScratchRegister, 0x1);
+ break;
+ }
+ case kX64I64x2Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister tmp1 = i.ToSimd128Register(instr->TempAt(0));
+ XMMRegister tmp2 = i.ToSimd128Register(instr->TempAt(1));
+
+ __ movaps(tmp1, left);
+ __ movaps(tmp2, right);
+
+ // Multiply high dword of each qword of left with right.
+ __ psrlq(tmp1, 32);
+ __ pmuludq(tmp1, right);
+
+ // Multiply high dword of each qword of right with left.
+ __ psrlq(tmp2, 32);
+ __ pmuludq(tmp2, left);
+
+ __ paddq(tmp2, tmp1);
+ __ psllq(tmp2, 32);
+
+ __ pmuludq(left, right);
+ __ paddq(left, tmp2); // left == dst
+ break;
+ }
+ case kX64I64x2Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I64x2GtS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ __ pcmpgtq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2GeS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ movaps(tmp, src);
+ __ pcmpgtq(tmp, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, tmp);
+ break;
+ }
+ case kX64I64x2ShrU: {
+ __ psrlq(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2GtU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+
+ __ movaps(tmp, src);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ __ pcmpgtq(dst, tmp);
+ break;
+ }
+ case kX64I64x2GeU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+
+ __ movaps(tmp, src);
+ __ pxor(dst, kScratchDoubleReg);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pcmpgtq(tmp, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, tmp);
+ break;
+ }
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputAt(0)->IsRegister()) {
@@ -3297,6 +3527,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ por(dst, kScratchDoubleReg);
break;
}
+ case kX64S1x2AnyTrue:
case kX64S1x4AnyTrue:
case kX64S1x8AnyTrue:
case kX64S1x16AnyTrue: {
@@ -3310,19 +3541,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmovq(zero, dst, tmp);
break;
}
- case kX64S1x4AllTrue:
- case kX64S1x8AllTrue:
+ // Need to split up all the different lane structures because the
+ // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
+ // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
+ // respectively.
+ case kX64S1x2AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqq);
+ break;
+ }
+ case kX64S1x4AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqd);
+ break;
+ }
+ case kX64S1x8AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
+ break;
+ }
case kX64S1x16AllTrue: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- Register dst = i.OutputRegister();
- XMMRegister src = i.InputSimd128Register(0);
- Register tmp = i.TempRegister(0);
- __ movq(tmp, Immediate(1));
- __ xorq(dst, dst);
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(kScratchDoubleReg, src);
- __ ptest(kScratchDoubleReg, kScratchDoubleReg);
- __ cmovq(zero, dst, tmp);
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
case kX64StackCheck:
@@ -3507,6 +3743,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_INSTR
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
+#undef ASSEMBLE_SIMD_ALL_TRUE
namespace {
@@ -3734,6 +3971,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3765,8 +4007,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3835,7 +4077,7 @@ void CodeGenerator::AssembleConstructFrame() {
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
- __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
+ __ Movdqu(Operand(rsp, kQuadWordSize * slot_idx),
XMMRegister::from_code(i));
slot_idx++;
}
@@ -3877,7 +4119,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
- __ movdqu(XMMRegister::from_code(i),
+ __ Movdqu(XMMRegister::from_code(i),
Operand(rsp, kQuadWordSize * slot_idx));
slot_idx++;
}
@@ -3970,6 +4212,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
+ }
+ break;
+ }
case Constant::kDelayedStringConstant: {
const StringConstantBase* src_constant = src.ToDelayedStringConstant();
__ MoveStringConstant(dst, src_constant);
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 57ef26dbd7..d6ac3f43df 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -58,7 +58,8 @@ namespace compiler {
V(X64Popcnt32) \
V(X64Bswap) \
V(X64Bswap32) \
- V(LFence) \
+ V(X64MFence) \
+ V(X64LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -158,6 +159,15 @@ namespace compiler {
V(X64Poke) \
V(X64Peek) \
V(X64StackCheck) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -177,6 +187,22 @@ namespace compiler {
V(X64F32x4Ne) \
V(X64F32x4Lt) \
V(X64F32x4Le) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2ReplaceLane) \
+ V(X64I64x2Neg) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2Ne) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2GtU) \
+ V(X64I64x2GeU) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
@@ -293,6 +319,8 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
+ V(X64S1x2AnyTrue) \
+ V(X64S1x2AllTrue) \
V(X64S1x4AnyTrue) \
V(X64S1x4AllTrue) \
V(X64S1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 9d48e9175a..6389ef2e50 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -124,6 +124,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64F64x2Splat:
+ case kX64F64x2ExtractLane:
+ case kX64F64x2ReplaceLane:
+ case kX64F64x2Abs:
+ case kX64F64x2Neg:
+ case kX64F64x2Eq:
+ case kX64F64x2Ne:
+ case kX64F64x2Lt:
+ case kX64F64x2Le:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -143,6 +152,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Ne:
case kX64F32x4Lt:
case kX64F32x4Le:
+ case kX64I64x2Splat:
+ case kX64I64x2ExtractLane:
+ case kX64I64x2ReplaceLane:
+ case kX64I64x2Neg:
+ case kX64I64x2Shl:
+ case kX64I64x2ShrS:
+ case kX64I64x2Add:
+ case kX64I64x2Sub:
+ case kX64I64x2Mul:
+ case kX64I64x2Eq:
+ case kX64I64x2Ne:
+ case kX64I64x2GtS:
+ case kX64I64x2GeS:
+ case kX64I64x2ShrU:
+ case kX64I64x2GtU:
+ case kX64I64x2GeU:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
@@ -233,6 +258,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Not:
case kX64S128Select:
case kX64S128Zero:
+ case kX64S1x2AnyTrue:
+ case kX64S1x2AllTrue:
case kX64S1x4AnyTrue:
case kX64S1x4AllTrue:
case kX64S1x8AnyTrue:
@@ -327,7 +354,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Poke:
return kHasSideEffect;
- case kLFence:
+ case kX64MFence:
+ case kX64LFence:
return kHasSideEffect;
case kX64Word64AtomicLoadUint8:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index a20590b8d3..a4908fb846 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -309,21 +309,19 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
X64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
X64OperandGenerator g(this);
-
- ArchOpcode opcode = GetLoadOpcode(load_rep);
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= MiscField::encode(kMemoryAccessProtected);
@@ -334,6 +332,11 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
+}
+
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
@@ -898,7 +901,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// Omit truncation and turn subtractions of constant values into immediate
// "leal" instructions by negating the value.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm));
+ g.DefineAsRegister(node), int64_input,
+ g.TempImmediate(base::NegateWithWraparound(imm)));
}
return;
}
@@ -907,9 +911,9 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else if (m.right().Is(0)) {
- // TODO(jarin): We should be able to use {EmitIdentity} here
- // (https://crbug.com/v8/7947).
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+ // {EmitIdentity} reuses the virtual register of the first input
+ // for the output. This is exactly what we want here.
+ EmitIdentity(node);
} else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leal" instructions
// by negating the value.
@@ -1254,23 +1258,47 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
}
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressed);
+ VisitLoad(node, value, kX64MovqDecompressAnyTagged);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value));
+ }
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedPointer);
+ VisitLoad(node, value, kX64MovqDecompressTaggedPointer);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
+ }
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedSigned);
+ VisitLoad(node, value, kX64MovqDecompressTaggedSigned);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
+ }
}
namespace {
@@ -2343,6 +2371,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64MFence, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -2545,12 +2578,18 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
+ V(F64x2) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
#define SIMD_BINOP_LIST(V) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
V(F32x4Add) \
V(F32x4AddHoriz) \
V(F32x4Sub) \
@@ -2561,6 +2600,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2615,12 +2659,18 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Or) \
V(S128Xor)
+#define SIMD_BINOP_ONE_TEMP_LIST(V) \
+ V(I64x2GeS) \
+ V(I64x2GtU) \
+ V(I64x2GeU)
+
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
+ V(I64x2Neg) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
V(I32x4Neg) \
@@ -2635,6 +2685,9 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
@@ -2646,11 +2699,13 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
+ V(S1x2AnyTrue) \
V(S1x4AnyTrue) \
V(S1x8AnyTrue) \
V(S1x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
+ V(S1x2AllTrue) \
V(S1x4AllTrue) \
V(S1x8AllTrue) \
V(S1x16AllTrue)
@@ -2721,6 +2776,18 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
+#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ arraysize(temps), temps); \
+ }
+SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP)
+#undef VISIT_SIMD_BINOP_ONE_TEMP
+#undef SIMD_BINOP_ONE_TEMP_LIST
+
#define VISIT_SIMD_ANYTRUE(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2751,12 +2818,33 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+void InstructionSelector::VisitF64x2Abs(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF64x2Neg(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
X64OperandGenerator g(this);
Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitI64x2Mul(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register(),
+ g.TempSimd128Register()};
+ Emit(kX64I64x2Mul, g.DefineSameAsFirst(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 9c23cd460a..b44bec5fc8 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -79,22 +79,28 @@ ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
}
BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
- Zone* zone, bool do_liveness_analysis)
+ Zone* zone, BailoutId osr_bailout_id,
+ bool analyze_liveness)
: bytecode_array_(bytecode_array),
- do_liveness_analysis_(do_liveness_analysis),
zone_(zone),
+ osr_bailout_id_(osr_bailout_id),
+ analyze_liveness_(analyze_liveness),
loop_stack_(zone),
loop_end_index_queue_(zone),
resume_jump_targets_(zone),
end_to_header_(zone),
header_to_info_(zone),
osr_entry_point_(-1),
- liveness_map_(bytecode_array->length(), zone) {}
+ liveness_map_(bytecode_array->length(), zone) {
+ Analyze();
+}
namespace {
-void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateInLiveness(
+ Bytecode bytecode,
+ BytecodeLivenessState& in_liveness, // NOLINT(runtime/references)
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -201,12 +207,14 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
}
}
-void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
- BytecodeLivenessState* next_bytecode_in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor,
- const BytecodeLivenessMap& liveness_map) {
+void UpdateOutLiveness(
+ Bytecode bytecode,
+ BytecodeLivenessState& out_liveness, // NOLINT(runtime/references)
+ BytecodeLivenessState* next_bytecode_in_liveness,
+ const interpreter::BytecodeArrayAccessor& accessor,
+ Handle<BytecodeArray> bytecode_array,
+ const BytecodeLivenessMap& liveness_map) {
int current_offset = accessor.current_offset();
- const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
// Special case Suspend and Resume to just pass through liveness.
if (bytecode == Bytecode::kSuspendGenerator ||
@@ -261,20 +269,24 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
}
}
-void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
+void UpdateLiveness(Bytecode bytecode,
+ BytecodeLiveness& liveness, // NOLINT(runtime/references)
BytecodeLivenessState** next_bytecode_in_liveness,
const interpreter::BytecodeArrayAccessor& accessor,
+ Handle<BytecodeArray> bytecode_array,
const BytecodeLivenessMap& liveness_map) {
UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness,
- accessor, liveness_map);
+ accessor, bytecode_array, liveness_map);
liveness.in->CopyFrom(*liveness.out);
UpdateInLiveness(bytecode, *liveness.in, accessor);
*next_bytecode_in_liveness = liveness.in;
}
-void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
- const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateAssignments(
+ Bytecode bytecode,
+ BytecodeLoopAssignments& assignments, // NOLINT(runtime/references)
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -307,15 +319,13 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
} // namespace
-void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
+void BytecodeAnalysis::Analyze() {
loop_stack_.push({-1, nullptr});
BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
-
- bool is_osr = !osr_bailout_id.IsNone();
- int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1;
-
int generator_switch_index = -1;
+ int osr_loop_end_offset = osr_bailout_id_.ToInt();
+ DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone());
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
@@ -337,14 +347,14 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
if (current_offset == osr_loop_end_offset) {
osr_entry_point_ = loop_header;
} else if (current_offset < osr_loop_end_offset) {
- // Check we've found the osr_entry_point if we've gone past the
+ // Assert that we've found the osr_entry_point if we've gone past the
// osr_loop_end_offset. Note, we are iterating the bytecode in reverse,
- // so the less than in the check is correct.
- DCHECK_NE(-1, osr_entry_point_);
+ // so the less-than in the above condition is correct.
+ DCHECK_LE(0, osr_entry_point_);
}
// Save the index so that we can do another pass later.
- if (do_liveness_analysis_) {
+ if (analyze_liveness_) {
loop_end_index_queue_.push_back(iterator.current_index());
}
} else if (loop_stack_.size() > 1) {
@@ -357,8 +367,8 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
// information we currently have.
UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
- // Update suspend counts for this loop, though only if not OSR.
- if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ // Update suspend counts for this loop.
+ if (bytecode == Bytecode::kSuspendGenerator) {
int suspend_id = iterator.GetUnsignedImmediateOperand(3);
int resume_offset = current_offset + iterator.current_bytecode_size();
current_loop_info->AddResumeTarget(
@@ -412,7 +422,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
}
}
}
- } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ } else if (bytecode == Bytecode::kSuspendGenerator) {
// If we're not in a loop, we still need to look for suspends.
// TODO(leszeks): It would be nice to de-duplicate this with the in-loop
// case
@@ -422,11 +432,11 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
ResumeJumpTarget::Leaf(suspend_id, resume_offset));
}
- if (do_liveness_analysis_) {
+ if (analyze_liveness_) {
BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
current_offset, bytecode_array()->register_count(), zone());
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
}
@@ -435,7 +445,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK(ResumeJumpTargetsAreValid());
- if (!do_liveness_analysis_) return;
+ if (!analyze_liveness_) return;
// At this point, every bytecode has a valid in and out liveness, except for
// propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
@@ -489,12 +499,13 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
// Now we are at the loop header. Since the in-liveness of the header
// can't change, we need only to update the out-liveness.
UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
- next_bytecode_in_liveness, iterator, liveness_map_);
+ next_bytecode_in_liveness, iterator, bytecode_array(),
+ liveness_map_);
}
// Process the generator switch statement separately, once the loops are done.
@@ -533,12 +544,12 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK_NE(bytecode, Bytecode::kJumpLoop);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
}
}
- DCHECK(do_liveness_analysis_);
+ DCHECK(analyze_liveness_);
if (FLAG_trace_environment_liveness) {
StdoutStream of;
PrintLivenessTo(of);
@@ -610,14 +621,14 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
int offset) const {
- if (!do_liveness_analysis_) return nullptr;
+ if (!analyze_liveness_) return nullptr;
return liveness_map_.GetInLiveness(offset);
}
const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
int offset) const {
- if (!do_liveness_analysis_) return nullptr;
+ if (!analyze_liveness_) return nullptr;
return liveness_map_.GetOutLiveness(offset);
}
@@ -662,9 +673,8 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
}
// If the iterator is invalid, we've reached the end without finding the
- // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we
- // need no jump targets. So, ensure there are no jump targets and exit.
- if (!iterator.IsValid() || HasOsrEntryPoint()) {
+ // generator switch. So, ensure there are no jump targets and exit.
+ if (!iterator.IsValid()) {
// Check top-level.
if (!resume_jump_targets().empty()) {
PrintF(stderr,
@@ -758,14 +768,14 @@ bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds(
valid = false;
} else {
// Make sure we're resuming to a Resume bytecode
- interpreter::BytecodeArrayAccessor assessor(bytecode_array(),
+ interpreter::BytecodeArrayAccessor accessor(bytecode_array(),
target.target_offset());
- if (assessor.current_bytecode() != Bytecode::kResumeGenerator) {
+ if (accessor.current_bytecode() != Bytecode::kResumeGenerator) {
PrintF(stderr,
"Expected resume target for id %d, offset %d, to be "
"ResumeGenerator, but found %s\n",
target.suspend_id(), target.target_offset(),
- Bytecodes::ToString(assessor.current_bytecode()));
+ Bytecodes::ToString(accessor.current_bytecode()));
valid = false;
}
@@ -820,7 +830,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
previous_liveness.CopyFrom(*liveness.out);
UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
- iterator, liveness_map_);
+ iterator, bytecode_array(), liveness_map_);
// UpdateOutLiveness skips kJumpLoop, so we update it manually.
if (bytecode == Bytecode::kJumpLoop) {
int target_offset = iterator.GetJumpTargetOffset();
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 53f86ca306..32c5168466 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -92,18 +92,14 @@ struct V8_EXPORT_PRIVATE LoopInfo {
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
};
-class V8_EXPORT_PRIVATE BytecodeAnalysis {
+// Analyze the bytecodes to find the loop ranges, loop nesting, loop assignments
+// and liveness. NOTE: The broker/serializer relies on the fact that an
+// analysis for OSR (osr_bailout_id is not None) subsumes an analysis for
+// non-OSR (osr_bailout_id is None).
+class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
- bool do_liveness_analysis);
-
- // Analyze the bytecodes to find the loop ranges, loop nesting, loop
- // assignments and liveness, under the assumption that there is an OSR bailout
- // at {osr_bailout_id}.
- //
- // No other methods in this class return valid information until this has been
- // called.
- void Analyze(BailoutId osr_bailout_id);
+ BailoutId osr_bailout_id, bool analyze_liveness);
// Return true if the given offset is a loop header
bool IsLoopHeader(int offset) const;
@@ -118,23 +114,30 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
return resume_jump_targets_;
}
- // True if the current analysis has an OSR entry point.
- bool HasOsrEntryPoint() const { return osr_entry_point_ != -1; }
-
- int osr_entry_point() const { return osr_entry_point_; }
-
- // Gets the in-liveness for the bytecode at {offset}.
+ // Gets the in-/out-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetInLivenessFor(int offset) const;
-
- // Gets the out-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
+ // In the case of OSR, the analysis also computes the (bytecode offset of the)
+ // OSR entry point from the {osr_bailout_id} that was given to the
+ // constructor.
+ int osr_entry_point() const {
+ CHECK_LE(0, osr_entry_point_);
+ return osr_entry_point_;
+ }
+ // Return the osr_bailout_id (for verification purposes).
+ BailoutId osr_bailout_id() const { return osr_bailout_id_; }
+
+ // Return whether liveness analysis was performed (for verification purposes).
+ bool liveness_analyzed() const { return analyze_liveness_; }
+
private:
struct LoopStackEntry {
int header_offset;
LoopInfo* loop_info;
};
+ void Analyze();
void PushLoop(int loop_header, int loop_end);
#if DEBUG
@@ -153,17 +156,15 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
std::ostream& PrintLivenessTo(std::ostream& os) const;
Handle<BytecodeArray> const bytecode_array_;
- bool const do_liveness_analysis_;
Zone* const zone_;
-
+ BailoutId const osr_bailout_id_;
+ bool const analyze_liveness_;
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
-
ZoneMap<int, int> end_to_header_;
ZoneMap<int, LoopInfo> header_to_info_;
int osr_entry_point_;
-
BytecodeLivenessMap liveness_map_;
DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 0ab8f85670..7c71446320 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -6,9 +6,11 @@
#include "src/ast/ast.h"
#include "src/codegen/source-position-table.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -32,14 +34,15 @@ namespace compiler {
class BytecodeGraphBuilder {
public:
BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
- Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeArrayRef bytecode_array,
+ SharedFunctionInfoRef shared,
+ FeedbackVectorRef feedback_vector, BailoutId osr_offset,
+ JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags);
+ NativeContextRef native_context, int inlining_id,
+ BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter);
// Creates a graph by visiting bytecodes.
void CreateGraph();
@@ -318,12 +321,8 @@ class BytecodeGraphBuilder {
return jsgraph_->simplified();
}
Zone* local_zone() const { return local_zone_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
- }
- const Handle<FeedbackVector>& feedback_vector() const {
- return feedback_vector_;
- }
+ const BytecodeArrayRef bytecode_array() const { return bytecode_array_; }
+ FeedbackVectorRef feedback_vector() const { return feedback_vector_; }
const JSTypeHintLowering& type_hint_lowering() const {
return type_hint_lowering_;
}
@@ -332,7 +331,7 @@ class BytecodeGraphBuilder {
}
SourcePositionTableIterator& source_position_iterator() {
- return source_position_iterator_;
+ return *source_position_iterator_.get();
}
interpreter::BytecodeArrayIterator& bytecode_iterator() {
@@ -343,8 +342,6 @@ class BytecodeGraphBuilder {
return bytecode_analysis_;
}
- void RunBytecodeAnalysis() { bytecode_analysis_.Analyze(osr_offset_); }
-
int currently_peeled_loop_offset() const {
return currently_peeled_loop_offset_;
}
@@ -368,9 +365,9 @@ class BytecodeGraphBuilder {
needs_eager_checkpoint_ = value;
}
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ SharedFunctionInfoRef shared_info() const { return shared_info_; }
- Handle<Context> native_context() const { return native_context_; }
+ NativeContextRef native_context() const { return native_context_; }
JSHeapBroker* broker() const { return broker_; }
@@ -382,15 +379,15 @@ class BytecodeGraphBuilder {
Zone* const local_zone_;
JSGraph* const jsgraph_;
CallFrequency const invocation_frequency_;
- Handle<BytecodeArray> const bytecode_array_;
- Handle<FeedbackVector> const feedback_vector_;
+ BytecodeArrayRef const bytecode_array_;
+ FeedbackVectorRef feedback_vector_;
JSTypeHintLowering const type_hint_lowering_;
const FrameStateFunctionInfo* const frame_state_function_info_;
- SourcePositionTableIterator source_position_iterator_;
+ std::unique_ptr<SourcePositionTableIterator> source_position_iterator_;
interpreter::BytecodeArrayIterator bytecode_iterator_;
- BytecodeAnalysis bytecode_analysis_;
+ BytecodeAnalysis const& bytecode_analysis_;
Environment* environment_;
- BailoutId const osr_offset_;
+ bool const osr_;
int currently_peeled_loop_offset_;
bool skip_next_stack_check_;
@@ -434,10 +431,12 @@ class BytecodeGraphBuilder {
SourcePosition const start_position_;
- Handle<SharedFunctionInfo> const shared_info_;
+ SharedFunctionInfoRef const shared_info_;
// The native context for which we optimize.
- Handle<Context> const native_context_;
+ NativeContextRef const native_context_;
+
+ TickCounter* const tick_counter_;
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
@@ -938,13 +937,12 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
- JSHeapBroker* broker, Zone* local_zone,
- Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared_info,
- Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
- JSGraph* jsgraph, CallFrequency const& invocation_frequency,
- SourcePositionTable* source_positions, Handle<Context> native_context,
- int inlining_id, BytecodeGraphBuilderFlags flags)
+ JSHeapBroker* broker, Zone* local_zone, BytecodeArrayRef bytecode_array,
+ SharedFunctionInfoRef shared_info, FeedbackVectorRef feedback_vector,
+ BailoutId osr_offset, JSGraph* jsgraph,
+ CallFrequency const& invocation_frequency,
+ SourcePositionTable* source_positions, NativeContextRef native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
: broker_(broker),
local_zone_(local_zone),
jsgraph_(jsgraph),
@@ -952,22 +950,22 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_array_(bytecode_array),
feedback_vector_(feedback_vector),
type_hint_lowering_(
- jsgraph, feedback_vector,
+ jsgraph, feedback_vector.object(),
(flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
- bytecode_array->parameter_count(), bytecode_array->register_count(),
- shared_info)),
- source_position_iterator_(
- handle(bytecode_array->SourcePositionTableIfCollected(), isolate())),
- bytecode_iterator_(bytecode_array),
- bytecode_analysis_(
- bytecode_array, local_zone,
- flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness),
+ bytecode_array.parameter_count(), bytecode_array.register_count(),
+ shared_info.object())),
+ bytecode_iterator_(
+ base::make_unique<OffHeapBytecodeArray>(bytecode_array)),
+ bytecode_analysis_(broker_->GetBytecodeAnalysis(
+ bytecode_array.object(), osr_offset,
+ flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
+ !FLAG_concurrent_inlining)),
environment_(nullptr),
- osr_offset_(osr_offset),
+ osr_(!osr_offset.IsNone()),
currently_peeled_loop_offset_(-1),
skip_next_stack_check_(flags &
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
@@ -981,9 +979,23 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
exit_controls_(local_zone),
state_values_cache_(jsgraph),
source_positions_(source_positions),
- start_position_(shared_info->StartPosition(), inlining_id),
+ start_position_(shared_info.StartPosition(), inlining_id),
shared_info_(shared_info),
- native_context_(native_context) {}
+ native_context_(native_context),
+ tick_counter_(tick_counter) {
+ if (FLAG_concurrent_inlining) {
+ // With concurrent inlining on, the source position address doesn't change
+ // because it's been copied from the heap.
+ source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ Vector<const byte>(bytecode_array.source_positions_address(),
+ bytecode_array.source_positions_size()));
+ } else {
+ // Otherwise, we need to access the table through a handle.
+ source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ handle(bytecode_array.object()->SourcePositionTableIfCollected(),
+ isolate()));
+ }
+}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
@@ -997,33 +1009,30 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
- NodeProperties::ReplaceContextInput(
- result, jsgraph()->HeapConstant(native_context()));
+ NodeProperties::ReplaceContextInput(result,
+ jsgraph()->Constant(native_context()));
return result;
}
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
- FeedbackNexus nexus(feedback_vector(), slot);
- return VectorSlotPair(feedback_vector(), slot, nexus.ic_state());
+ FeedbackNexus nexus(feedback_vector().object(), slot);
+ return VectorSlotPair(feedback_vector().object(), slot, nexus.ic_state());
}
void BytecodeGraphBuilder::CreateGraph() {
- BytecodeArrayRef bytecode_array_ref(broker(), bytecode_array());
-
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
// parameters (including the receiver) plus new target, number of arguments,
// context and closure.
- int actual_parameter_count = bytecode_array_ref.parameter_count() + 4;
+ int actual_parameter_count = bytecode_array().parameter_count() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
- Environment env(
- this, bytecode_array_ref.register_count(),
- bytecode_array_ref.parameter_count(),
- bytecode_array_ref.incoming_new_target_or_generator_register(),
- graph()->start());
+ Environment env(this, bytecode_array().register_count(),
+ bytecode_array().parameter_count(),
+ bytecode_array().incoming_new_target_or_generator_register(),
+ graph()->start());
set_environment(&env);
VisitBytecodes();
@@ -1112,19 +1121,17 @@ class BytecodeGraphBuilder::OsrIteratorState {
void ProcessOsrPrelude() {
ZoneVector<int> outer_loop_offsets(graph_builder_->local_zone());
- BytecodeAnalysis const& bytecode_analysis =
- graph_builder_->bytecode_analysis();
- int osr_offset = bytecode_analysis.osr_entry_point();
+ int osr_entry = graph_builder_->bytecode_analysis().osr_entry_point();
// We find here the outermost loop which contains the OSR loop.
- int outermost_loop_offset = osr_offset;
- while ((outermost_loop_offset =
- bytecode_analysis.GetLoopInfoFor(outermost_loop_offset)
- .parent_offset()) != -1) {
+ int outermost_loop_offset = osr_entry;
+ while ((outermost_loop_offset = graph_builder_->bytecode_analysis()
+ .GetLoopInfoFor(outermost_loop_offset)
+ .parent_offset()) != -1) {
outer_loop_offsets.push_back(outermost_loop_offset);
}
outermost_loop_offset =
- outer_loop_offsets.empty() ? osr_offset : outer_loop_offsets.back();
+ outer_loop_offsets.empty() ? osr_entry : outer_loop_offsets.back();
graph_builder_->AdvanceIteratorsTo(outermost_loop_offset);
// We save some iterators states at the offsets of the loop headers of the
@@ -1142,14 +1149,16 @@ class BytecodeGraphBuilder::OsrIteratorState {
}
// Finishing by advancing to the OSR entry
- graph_builder_->AdvanceIteratorsTo(osr_offset);
+ graph_builder_->AdvanceIteratorsTo(osr_entry);
// Enters all remaining exception handler which end before the OSR loop
// so that on next call of VisitSingleBytecode they will get popped from
// the exception handlers stack.
- graph_builder_->ExitThenEnterExceptionHandlers(osr_offset);
+ graph_builder_->ExitThenEnterExceptionHandlers(osr_entry);
graph_builder_->set_currently_peeled_loop_offset(
- bytecode_analysis.GetLoopInfoFor(osr_offset).parent_offset());
+ graph_builder_->bytecode_analysis()
+ .GetLoopInfoFor(osr_entry)
+ .parent_offset());
}
void RestoreState(int target_offset, int new_parent_offset) {
@@ -1198,8 +1207,8 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset(
void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
OsrIteratorState iterator_states(this);
iterator_states.ProcessOsrPrelude();
- int osr_offset = bytecode_analysis().osr_entry_point();
- DCHECK_EQ(bytecode_iterator().current_offset(), osr_offset);
+ int osr_entry = bytecode_analysis().osr_entry_point();
+ DCHECK_EQ(bytecode_iterator().current_offset(), osr_entry);
environment()->FillWithOsrValues();
@@ -1217,7 +1226,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
// parent loop entirely, and so on.
int current_parent_offset =
- bytecode_analysis().GetLoopInfoFor(osr_offset).parent_offset();
+ bytecode_analysis().GetLoopInfoFor(osr_entry).parent_offset();
while (current_parent_offset != -1) {
const LoopInfo& current_parent_loop =
bytecode_analysis().GetLoopInfoFor(current_parent_offset);
@@ -1261,6 +1270,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
}
void BytecodeGraphBuilder::VisitSingleBytecode() {
+ tick_counter_->DoTick();
int current_offset = bytecode_iterator().current_offset();
UpdateSourcePosition(current_offset);
ExitThenEnterExceptionHandlers(current_offset);
@@ -1289,14 +1299,12 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
}
void BytecodeGraphBuilder::VisitBytecodes() {
- RunBytecodeAnalysis();
-
if (!bytecode_analysis().resume_jump_targets().empty()) {
environment()->BindGeneratorState(
jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
}
- if (bytecode_analysis().HasOsrEntryPoint()) {
+ if (osr_) {
// We peel the OSR loop and any outer loop containing it except that we
// leave the nodes corresponding to the whole outermost loop (including
// the last copies of the loops it contains) to be generated by the normal
@@ -1333,7 +1341,7 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
void BytecodeGraphBuilder::VisitLdaConstant() {
Node* node = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
environment()->BindAccumulator(node);
}
@@ -1383,15 +1391,16 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
uint32_t feedback_slot_index,
TypeofMode typeof_mode) {
VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
- DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot())));
+ DCHECK(
+ IsLoadGlobalICKind(feedback_vector().object()->GetKind(feedback.slot())));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
return NewNode(op);
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
@@ -1400,8 +1409,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() {
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
@@ -1410,8 +1419,8 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
@@ -1537,7 +1546,7 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
@@ -1622,7 +1631,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1657,9 +1666,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1675,7 +1683,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1705,7 +1713,7 @@ void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -1729,8 +1737,8 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
@@ -1753,8 +1761,8 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->LoadNamed(name, VectorSlotPair());
Node* node = NewNode(op, object);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1788,8 +1796,8 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
@@ -1828,8 +1836,8 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
LanguageMode language_mode =
static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
const Operator* op =
@@ -1902,10 +1910,8 @@ void BytecodeGraphBuilder::VisitPopContext() {
}
void BytecodeGraphBuilder::VisitCreateClosure() {
- Handle<SharedFunctionInfo> shared_info(
- SharedFunctionInfo::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
AllocationType allocation =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
@@ -1913,7 +1919,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
: AllocationType::kYoung;
const Operator* op = javascript()->CreateClosure(
shared_info,
- feedback_vector()->GetClosureFeedbackCell(
+ feedback_vector().object()->GetClosureFeedbackCell(
bytecode_iterator().GetIndexOperand(1)),
handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy),
isolate()),
@@ -1923,9 +1929,8 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op = javascript()->CreateBlockContext(scope_info);
Node* context = NewNode(op);
@@ -1933,9 +1938,8 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE);
@@ -1944,9 +1948,8 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE);
@@ -1957,9 +1960,8 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
void BytecodeGraphBuilder::VisitCreateCatchContext() {
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->CreateCatchContext(scope_info);
Node* context = NewNode(op, exception);
@@ -1969,9 +1971,8 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->CreateWithContext(scope_info);
Node* context = NewNode(op, object);
@@ -1997,9 +1998,8 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- Handle<String> constant_pattern(
- String::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<String> constant_pattern = Handle<String>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2009,10 +2009,9 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- Handle<ArrayBoilerplateDescription> array_boilerplate_description(
- ArrayBoilerplateDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description =
+ Handle<ArrayBoilerplateDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2046,10 +2045,9 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- Handle<ObjectBoilerplateDescription> constant_properties(
- ObjectBoilerplateDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ObjectBoilerplateDescription> constant_properties =
+ Handle<ObjectBoilerplateDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2082,29 +2080,13 @@ void BytecodeGraphBuilder::VisitCloneObject() {
}
void BytecodeGraphBuilder::VisitGetTemplateObject() {
- Handle<TemplateObjectDescription> description(
- TemplateObjectDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackNexus nexus(feedback_vector(), slot);
-
- Handle<JSArray> cached_value;
- if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::zero())) {
- // It's not observable when the template object is created, so we
- // can just create it eagerly during graph building and bake in
- // the JSArray constant here.
- cached_value = TemplateObjectDescription::GetTemplateObject(
- isolate(), native_context(), description, shared_info(), slot.ToInt());
- nexus.vector().Set(slot, *cached_value);
- } else {
- cached_value =
- handle(JSArray::cast(nexus.GetFeedback()->GetHeapObjectAssumeStrong()),
- isolate());
- }
-
- Node* template_object = jsgraph()->HeapConstant(cached_value);
- environment()->BindAccumulator(template_object);
+ ObjectRef description(
+ broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ JSArrayRef template_object =
+ shared_info().GetTemplateObject(description, feedback_vector(), slot);
+ environment()->BindAccumulator(jsgraph()->Constant(template_object));
}
Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
@@ -2587,7 +2569,7 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
BuildHoleCheckAndThrow(check_for_hole,
Runtime::kThrowAccessedUninitializedVariable, name);
}
@@ -2658,7 +2640,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
return nexus.GetBinaryOperationFeedback();
}
@@ -2666,14 +2648,14 @@ BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
// feedback.
CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
return nexus.GetCompareOperationFeedback();
}
// Helper function to create for-in mode from the recorded type feedback.
ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
switch (nexus.GetForInFeedback()) {
case ForInHint::kNone:
case ForInHint::kEnumCacheKeysAndIndices:
@@ -2688,7 +2670,8 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
if (invocation_frequency_.IsUnknown()) return CallFrequency();
- FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector().object(),
+ FeedbackVector::ToSlot(slot_id));
float feedback_frequency = nexus.ComputeCallFrequency();
if (feedback_frequency == 0.0f) {
// This is to prevent multiplying zero and infinity.
@@ -2699,7 +2682,8 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
}
SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
- FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector().object(),
+ FeedbackVector::ToSlot(slot_id));
return nexus.GetSpeculationMode();
}
@@ -3301,8 +3285,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
CHECK_EQ(0, first_reg.index());
int register_count =
static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
- int parameter_count_without_receiver =
- bytecode_array()->parameter_count() - 1;
+ int parameter_count_without_receiver = bytecode_array().parameter_count() - 1;
Node* suspend_id = jsgraph()->SmiConstant(
bytecode_iterator().GetUnsignedImmediateOperand(3));
@@ -3442,8 +3425,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
const BytecodeLivenessState* liveness = bytecode_analysis().GetOutLivenessFor(
bytecode_iterator().current_offset());
- int parameter_count_without_receiver =
- bytecode_array()->parameter_count() - 1;
+ int parameter_count_without_receiver = bytecode_array().parameter_count() - 1;
// Mapping between registers and array indices must match that used in
// InterpreterAssembler::ExportParametersAndRegisterFile.
@@ -3836,7 +3818,10 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
}
void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) {
- HandlerTable table(*bytecode_array());
+ DisallowHeapAllocation no_allocation;
+ HandlerTable table(bytecode_array().handler_table_address(),
+ bytecode_array().handler_table_size(),
+ HandlerTable::kRangeBasedEncoding);
// Potentially exit exception handlers.
while (!exception_handlers_.empty()) {
@@ -3890,7 +3875,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_context) {
*current_input++ = OperatorProperties::NeedsExactContext(op)
? environment()->Context()
- : jsgraph()->HeapConstant(native_context());
+ : jsgraph()->Constant(native_context());
}
if (has_frame_state) {
// The frame state will be inserted later. Here we misuse the {Dead} node
@@ -4037,12 +4022,19 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags) {
- BytecodeGraphBuilder builder(broker, local_zone, bytecode_array, shared,
- feedback_vector, osr_offset, jsgraph,
- invocation_frequency, source_positions,
- native_context, inlining_id, flags);
+ Handle<NativeContext> native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter) {
+ BytecodeArrayRef bytecode_array_ref(broker, bytecode_array);
+ DCHECK(bytecode_array_ref.IsSerializedForCompilation());
+ FeedbackVectorRef feedback_vector_ref(broker, feedback_vector);
+ SharedFunctionInfoRef shared_ref(broker, shared);
+ DCHECK(shared_ref.IsSerializedForCompilation(feedback_vector_ref));
+ NativeContextRef native_context_ref(broker, native_context);
+ BytecodeGraphBuilder builder(
+ broker, local_zone, bytecode_array_ref, shared_ref, feedback_vector_ref,
+ osr_offset, jsgraph, invocation_frequency, source_positions,
+ native_context_ref, inlining_id, flags, tick_counter);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index b9504a6086..682569778f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -11,6 +11,9 @@
#include "src/handles/handles.h"
namespace v8 {
+
+class TickCounter;
+
namespace internal {
class BytecodeArray;
@@ -25,6 +28,9 @@ class SourcePositionTable;
enum class BytecodeGraphBuilderFlag : uint8_t {
kSkipFirstStackCheck = 1 << 0,
+ // TODO(neis): Remove liveness flag here when concurrent inlining is always
+ // on, because then the serializer will be the only place where we perform
+ // bytecode analysis.
kAnalyzeEnvironmentLiveness = 1 << 1,
kBailoutOnUninitialized = 1 << 2,
};
@@ -39,8 +45,9 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags);
+ Handle<NativeContext> native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index d8a01d6308..af0ba98ffd 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -226,8 +226,12 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node,
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kWeakHeapObjectTag)),
&ok);
- Node* message_node = StringConstant(location);
- DebugAbort(message_node);
+ EmbeddedVector<char, 1024> message;
+ SNPrintF(message, "no Object: %s", location);
+ Node* message_node = StringConstant(message.begin());
+ // This somewhat misuses the AbortCSAAssert runtime function. This will print
+ // "abort: CSA_ASSERT failed: <message>", which is good enough.
+ AbortCSAAssert(message_node);
Unreachable();
Bind(&ok);
}
@@ -409,8 +413,8 @@ void CodeAssembler::ReturnRaw(Node* value) {
return raw_assembler()->Return(value);
}
-void CodeAssembler::DebugAbort(Node* message) {
- raw_assembler()->DebugAbort(message);
+void CodeAssembler::AbortCSAAssert(Node* message) {
+ raw_assembler()->AbortCSAAssert(message);
}
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
@@ -441,16 +445,16 @@ void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
}
#endif // DEBUG
-Node* CodeAssembler::LoadFramePointer() {
- return raw_assembler()->LoadFramePointer();
+TNode<RawPtrT> CodeAssembler::LoadFramePointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadFramePointer());
}
-Node* CodeAssembler::LoadParentFramePointer() {
- return raw_assembler()->LoadParentFramePointer();
+TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-Node* CodeAssembler::LoadStackPointer() {
- return raw_assembler()->LoadStackPointer();
+TNode<RawPtrT> CodeAssembler::LoadStackPointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadStackPointer());
}
TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(
@@ -1140,14 +1144,6 @@ Node* CodeAssembler::Retain(Node* value) {
return raw_assembler()->Retain(value);
}
-Node* CodeAssembler::ChangeTaggedToCompressed(Node* tagged) {
- return raw_assembler()->ChangeTaggedToCompressed(tagged);
-}
-
-Node* CodeAssembler::ChangeCompressedToTagged(Node* compressed) {
- return raw_assembler()->ChangeCompressedToTagged(compressed);
-}
-
Node* CodeAssembler::Projection(int index, Node* value) {
DCHECK_LT(index, value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 0f7ae64082..cc432214aa 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -73,6 +73,9 @@ class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
class WasmDebugInfo;
class Zone;
+#define MAKE_FORWARD_DECLARATION(V, NAME, Name, name) class Name;
+TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
+#undef MAKE_FORWARD_DECLARATION
template <typename T>
class Signature;
@@ -107,13 +110,13 @@ struct Uint32T : Word32T {
struct Int16T : Int32T {
static constexpr MachineType kMachineType = MachineType::Int16();
};
-struct Uint16T : Uint32T {
+struct Uint16T : Uint32T, Int32T {
static constexpr MachineType kMachineType = MachineType::Uint16();
};
struct Int8T : Int16T {
static constexpr MachineType kMachineType = MachineType::Int8();
};
-struct Uint8T : Uint16T {
+struct Uint8T : Uint16T, Int16T {
static constexpr MachineType kMachineType = MachineType::Uint8();
};
@@ -147,6 +150,12 @@ struct Float64T : UntaggedT {
static constexpr MachineType kMachineType = MachineType::Float64();
};
+#ifdef V8_COMPRESS_POINTERS
+using TaggedT = Int32T;
+#else
+using TaggedT = IntPtrT;
+#endif
+
// Result of a comparison operation.
struct BoolT : Word32T {};
@@ -329,6 +338,7 @@ class WasmExceptionObject;
class WasmExceptionTag;
class WasmExportedFunctionData;
class WasmGlobalObject;
+class WasmIndirectFunctionTable;
class WasmJSFunctionData;
class WasmMemoryObject;
class WasmModuleObject;
@@ -413,6 +423,10 @@ struct types_have_common_values {
static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
};
template <class U>
+struct types_have_common_values<BoolT, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
struct types_have_common_values<Uint32T, U> {
static const bool value = types_have_common_values<Word32T, U>::value;
};
@@ -611,14 +625,15 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64Sqrt, Float64T, Float64T) \
V(Float64Tan, Float64T, Float64T) \
V(Float64Tanh, Float64T, Float64T) \
- V(Float64ExtractLowWord32, Word32T, Float64T) \
- V(Float64ExtractHighWord32, Word32T, Float64T) \
+ V(Float64ExtractLowWord32, Uint32T, Float64T) \
+ V(Float64ExtractHighWord32, Uint32T, Float64T) \
V(BitcastTaggedToWord, IntPtrT, Object) \
+ V(BitcastTaggedSignedToWord, IntPtrT, Smi) \
V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \
V(BitcastWordToTagged, Object, WordT) \
V(BitcastWordToTaggedSigned, Smi, WordT) \
V(TruncateFloat64ToFloat32, Float32T, Float64T) \
- V(TruncateFloat64ToWord32, Word32T, Float64T) \
+ V(TruncateFloat64ToWord32, Uint32T, Float64T) \
V(TruncateInt64ToInt32, Int32T, Int64T) \
V(ChangeFloat32ToFloat64, Float64T, Float32T) \
V(ChangeFloat64ToUint32, Uint32T, Float64T) \
@@ -628,7 +643,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(ChangeUint32ToFloat64, Float64T, Word32T) \
V(ChangeUint32ToUint64, Uint64T, Word32T) \
V(BitcastInt32ToFloat32, Float32T, Word32T) \
- V(BitcastFloat32ToInt32, Word32T, Float32T) \
+ V(BitcastFloat32ToInt32, Uint32T, Float32T) \
V(RoundFloat64ToInt32, Int32T, Float64T) \
V(RoundInt32ToFloat32, Int32T, Float32T) \
V(Float64SilenceNaN, Float64T, Float64T) \
@@ -840,10 +855,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// TODO(jkummerow): The style guide wants pointers for output parameters.
// https://google.github.io/styleguide/cppguide.html#Output_Parameters
- bool ToInt32Constant(Node* node, int32_t& out_value);
- bool ToInt64Constant(Node* node, int64_t& out_value);
+ bool ToInt32Constant(Node* node,
+ int32_t& out_value); // NOLINT(runtime/references)
+ bool ToInt64Constant(Node* node,
+ int64_t& out_value); // NOLINT(runtime/references)
bool ToSmiConstant(Node* node, Smi* out_value);
- bool ToIntPtrConstant(Node* node, intptr_t& out_value);
+ bool ToIntPtrConstant(Node* node,
+ intptr_t& out_value); // NOLINT(runtime/references)
bool IsUndefinedConstant(TNode<Object> node);
bool IsNullConstant(TNode<Object> node);
@@ -872,7 +890,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void ReturnRaw(Node* value);
- void DebugAbort(Node* message);
+ void AbortCSAAssert(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const char* msg) {
@@ -938,11 +956,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Label** case_labels, size_t case_count);
// Access to the frame pointer
- Node* LoadFramePointer();
- Node* LoadParentFramePointer();
+ TNode<RawPtrT> LoadFramePointer();
+ TNode<RawPtrT> LoadParentFramePointer();
// Access to the stack pointer
- Node* LoadStackPointer();
+ TNode<RawPtrT> LoadStackPointer();
// Poison |value| on speculative paths.
TNode<Object> TaggedPoisonOnSpeculation(SloppyTNode<Object> value);
@@ -1047,20 +1065,60 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
- TNode<IntPtrT> WordShr(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return UncheckedCast<IntPtrT>(
+ TNode<UintPtrT> WordShr(TNode<UintPtrT> left, TNode<IntegralT> right) {
+ return Unsigned(
WordShr(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<IntPtrT> WordSar(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return UncheckedCast<IntPtrT>(
- WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<IntPtrT> WordShl(TNode<IntPtrT> left, TNode<IntegralT> right) {
+ return Signed(WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<UintPtrT> WordShl(TNode<UintPtrT> left, TNode<IntegralT> right) {
+ return Unsigned(
+ WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Word32Shl(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Shl(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Shr(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Shr(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<IntPtrT> WordAnd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
- return UncheckedCast<IntPtrT>(
+ return Signed(WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<UintPtrT> WordAnd(TNode<UintPtrT> left, TNode<UintPtrT> right) {
+ return Unsigned(
WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<Int32T> Word32And(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32And(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Word32Or(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Or(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
template <class Left, class Right,
class = typename std::enable_if<
std::is_base_of<Object, Left>::value &&
@@ -1106,6 +1164,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<BoolT> Word64NotEqual(SloppyTNode<Word64T> left,
SloppyTNode<Word64T> right);
+ TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) {
+ return UncheckedCast<BoolT>(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<BoolT> Word32And(TNode<BoolT> left, TNode<BoolT> right) {
+ return UncheckedCast<BoolT>(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
TNode<Int32T> Int32Add(TNode<Int32T> left, TNode<Int32T> right) {
return Signed(
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
@@ -1116,6 +1183,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<Int32T> Int32Sub(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Int32Sub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Int32Mul(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Int32Mul(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
TNode<WordT> IntPtrAdd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<IntPtrT> IntPtrDiv(TNode<IntPtrT> left, TNode<IntPtrT> right);
TNode<WordT> IntPtrSub(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
@@ -1195,6 +1272,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+ template <class Dummy = void>
+ TNode<IntPtrT> BitcastTaggedToWord(TNode<Smi> node) {
+ static_assert(sizeof(Dummy) < 0,
+ "Should use BitcastTaggedSignedToWord instead.");
+ }
+
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
// Assumes that the double can be exactly represented as an int.
TNode<UintPtrT> ChangeFloat64ToUintPtr(SloppyTNode<Float64T> value);
@@ -1217,10 +1300,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Projections
Node* Projection(int index, Node* value);
- // Pointer compression and decompression.
- Node* ChangeTaggedToCompressed(Node* tagged);
- Node* ChangeCompressedToTagged(Node* compressed);
-
template <int index, class T1, class T2>
TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type>
Projection(TNode<PairT<T1, T2>> value) {
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index fa727748f6..5dd765527f 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -337,9 +337,9 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
// End
// Now the effect input to the {Return} node can be either an {EffectPhi}
- // hanging off the same {Merge}, or the {Merge} node is only connected to
- // the {Return} and the {Phi}, in which case we know that the effect input
- // must somehow dominate all merged branches.
+ // hanging off the same {Merge}, or the effect chain doesn't depend on the
+ // {Phi} or the {Merge}, in which case we know that the effect input must
+ // somehow dominate all merged branches.
Node::Inputs control_inputs = control->inputs();
Node::Inputs value_inputs = value->inputs();
@@ -347,7 +347,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
DCHECK_NE(0, graph()->end()->InputCount());
- if (control->OwnedBy(node, value)) {
+ if (control->OwnedBy(node, value) && value->OwnedBy(node)) {
for (int i = 0; i < control_inputs.count(); ++i) {
// Create a new {Return} and connect it to {end}. We don't need to mark
// {end} as revisit, because we mark {node} as {Dead} below, which was
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 45e558f609..0ef6402264 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -1216,8 +1216,18 @@ const Operator* CommonOperatorBuilder::HeapConstant(
value); // parameter
}
+const Operator* CommonOperatorBuilder::CompressedHeapConstant(
+ const Handle<HeapObject>& value) {
+ return new (zone()) Operator1<Handle<HeapObject>>( // --
+ IrOpcode::kCompressedHeapConstant, Operator::kPure, // opcode
+ "CompressedHeapConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
+}
+
Handle<HeapObject> HeapConstantOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
+ DCHECK(IrOpcode::kHeapConstant == op->opcode() ||
+ IrOpcode::kCompressedHeapConstant == op->opcode());
return OpParameter<Handle<HeapObject>>(op);
}
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 43a689b5c2..9f634e72ec 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -499,6 +499,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* NumberConstant(volatile double);
const Operator* PointerConstant(intptr_t);
const Operator* HeapConstant(const Handle<HeapObject>&);
+ const Operator* CompressedHeapConstant(const Handle<HeapObject>&);
const Operator* ObjectId(uint32_t);
const Operator* RelocatableInt32Constant(int32_t value,
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index f0bb797b68..673f4a341b 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -4,6 +4,7 @@
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/compilation-dependency.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/objects-inl.h"
@@ -17,18 +18,7 @@ CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
: zone_(zone), broker_(broker), dependencies_(zone) {}
-class CompilationDependencies::Dependency : public ZoneObject {
- public:
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(const MaybeObjectHandle& code) const = 0;
-
-#ifdef DEBUG
- virtual bool IsPretenureModeDependency() const { return false; }
-#endif
-};
-
-class InitialMapDependency final : public CompilationDependencies::Dependency {
+class InitialMapDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the initial map.
@@ -56,8 +46,7 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
MapRef initial_map_;
};
-class PrototypePropertyDependency final
- : public CompilationDependencies::Dependency {
+class PrototypePropertyDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the prototype.
@@ -96,7 +85,7 @@ class PrototypePropertyDependency final
ObjectRef prototype_;
};
-class StableMapDependency final : public CompilationDependencies::Dependency {
+class StableMapDependency final : public CompilationDependency {
public:
explicit StableMapDependency(const MapRef& map) : map_(map) {
DCHECK(map_.is_stable());
@@ -114,7 +103,7 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
MapRef map_;
};
-class TransitionDependency final : public CompilationDependencies::Dependency {
+class TransitionDependency final : public CompilationDependency {
public:
explicit TransitionDependency(const MapRef& map) : map_(map) {
DCHECK(!map_.is_deprecated());
@@ -132,8 +121,7 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
MapRef map_;
};
-class PretenureModeDependency final
- : public CompilationDependencies::Dependency {
+class PretenureModeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the mode.
@@ -163,8 +151,7 @@ class PretenureModeDependency final
AllocationType allocation_;
};
-class FieldRepresentationDependency final
- : public CompilationDependencies::Dependency {
+class FieldRepresentationDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the representation.
@@ -197,7 +184,7 @@ class FieldRepresentationDependency final
Representation representation_;
};
-class FieldTypeDependency final : public CompilationDependencies::Dependency {
+class FieldTypeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type.
@@ -227,8 +214,7 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
ObjectRef type_;
};
-class FieldConstnessDependency final
- : public CompilationDependencies::Dependency {
+class FieldConstnessDependency final : public CompilationDependency {
public:
FieldConstnessDependency(const MapRef& owner, int descriptor)
: owner_(owner), descriptor_(descriptor) {
@@ -255,8 +241,7 @@ class FieldConstnessDependency final
int descriptor_;
};
-class GlobalPropertyDependency final
- : public CompilationDependencies::Dependency {
+class GlobalPropertyDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type and the read_only flag.
@@ -294,7 +279,7 @@ class GlobalPropertyDependency final
bool read_only_;
};
-class ProtectorDependency final : public CompilationDependencies::Dependency {
+class ProtectorDependency final : public CompilationDependency {
public:
explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid);
@@ -315,8 +300,7 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
PropertyCellRef cell_;
};
-class ElementsKindDependency final
- : public CompilationDependencies::Dependency {
+class ElementsKindDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the elements kind.
@@ -349,7 +333,7 @@ class ElementsKindDependency final
};
class InitialMapInstanceSizePredictionDependency final
- : public CompilationDependencies::Dependency {
+ : public CompilationDependency {
public:
InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
int instance_size)
@@ -380,7 +364,8 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
-void CompilationDependencies::RecordDependency(Dependency const* dependency) {
+void CompilationDependencies::RecordDependency(
+ CompilationDependency const* dependency) {
if (dependency != nullptr) dependencies_.push_front(dependency);
}
@@ -565,6 +550,11 @@ namespace {
// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
+ // TODO(neis): Remove heap access (SerializePrototype call).
+ AllowCodeDependencyChange dependency_change_;
+ AllowHandleAllocation handle_allocation_;
+ AllowHandleDereference handle_dereference_;
+ AllowHeapAllocation heap_allocation_;
while (true) {
map.SerializePrototype();
HeapObjectRef proto = map.prototype();
@@ -635,7 +625,7 @@ CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
return SlackTrackingPrediction(initial_map, instance_size);
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::TransitionDependencyOffTheRecord(
const MapRef& target_map) const {
if (target_map.CanBeDeprecated()) {
@@ -646,7 +636,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord(
}
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
const MapRef& map, int descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
@@ -657,7 +647,7 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
details.representation());
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map,
int descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 37a2bc3a28..cb6cea0685 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -25,6 +25,8 @@ class SlackTrackingPrediction {
int inobject_property_count_;
};
+class CompilationDependency;
+
// Collects and installs dependencies of the code that is being generated.
class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
public:
@@ -113,14 +115,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// DependOnTransition(map);
// is equivalent to:
// RecordDependency(TransitionDependencyOffTheRecord(map));
- class Dependency;
- void RecordDependency(Dependency const* dependency);
- Dependency const* TransitionDependencyOffTheRecord(
+ void RecordDependency(CompilationDependency const* dependency);
+ CompilationDependency const* TransitionDependencyOffTheRecord(
const MapRef& target_map) const;
- Dependency const* FieldRepresentationDependencyOffTheRecord(
+ CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
+ const MapRef& map, int descriptor) const;
+ CompilationDependency const* FieldTypeDependencyOffTheRecord(
const MapRef& map, int descriptor) const;
- Dependency const* FieldTypeDependencyOffTheRecord(const MapRef& map,
- int descriptor) const;
// Exposed only for testing purposes.
bool AreValid() const;
@@ -128,7 +129,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
private:
Zone* const zone_;
JSHeapBroker* const broker_;
- ZoneForwardList<Dependency const*> dependencies_;
+ ZoneForwardList<CompilationDependency const*> dependencies_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
new file mode 100644
index 0000000000..e5726a0ddb
--- /dev/null
+++ b/deps/v8/src/compiler/compilation-dependency.h
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_
+#define V8_COMPILER_COMPILATION_DEPENDENCY_H_
+
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class MaybeObjectHandle;
+
+namespace compiler {
+
+class CompilationDependency : public ZoneObject {
+ public:
+ virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(const MaybeObjectHandle& code) const = 0;
+
+#ifdef DEBUG
+ virtual bool IsPretenureModeDependency() const { return false; }
+#endif
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 7177a6069d..600db1d160 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/control-flow-optimizer.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-matchers.h"
@@ -16,18 +17,20 @@ namespace compiler {
ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
+ TickCounter* tick_counter,
Zone* zone)
: graph_(graph),
common_(common),
machine_(machine),
queue_(zone),
queued_(graph, 2),
- zone_(zone) {}
-
+ zone_(zone),
+ tick_counter_(tick_counter) {}
void ControlFlowOptimizer::Optimize() {
Enqueue(graph()->start());
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
if (node->IsDead()) continue;
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 0a688a7c39..07fc9e6fc2 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -22,7 +25,8 @@ class Node;
class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
public:
ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine, Zone* zone);
+ MachineOperatorBuilder* machine,
+ TickCounter* tick_counter, Zone* zone);
void Optimize();
@@ -45,6 +49,7 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
ZoneQueue<Node*> queue_;
NodeMarker<bool> queued_;
Zone* const zone_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
};
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
new file mode 100644
index 0000000000..620d98019f
--- /dev/null
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -0,0 +1,336 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/csa-load-elimination.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction CsaLoadElimination::Reduce(Node* node) {
+ if (FLAG_trace_turbo_load_elimination) {
+ if (node->op()->EffectInputCount() > 0) {
+ PrintF(" visit #%d:%s", node->id(), node->op()->mnemonic());
+ if (node->op()->ValueInputCount() > 0) {
+ PrintF("(");
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ if (i > 0) PrintF(", ");
+ Node* const value = NodeProperties::GetValueInput(node, i);
+ PrintF("#%d:%s", value->id(), value->op()->mnemonic());
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (AbstractState const* const state = node_states_.Get(effect)) {
+ PrintF(" state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ state->Print();
+ } else {
+ PrintF(" no state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ }
+ }
+ }
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kLoadFromObject:
+ return ReduceLoadFromObject(node, ObjectAccessOf(node->op()));
+ case IrOpcode::kStoreToObject:
+ return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
+ case IrOpcode::kDebugBreak:
+ case IrOpcode::kAbortCSAAssert:
+ // Avoid changing optimizations in the presence of debug instructions.
+ return PropagateInputState(node);
+ case IrOpcode::kCall:
+ return ReduceCall(node);
+ case IrOpcode::kEffectPhi:
+ return ReduceEffectPhi(node);
+ case IrOpcode::kDead:
+ break;
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ return ReduceOtherNode(node);
+ }
+ return NoChange();
+}
+
+namespace CsaLoadEliminationHelpers {
+
+bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
+ if (r1 == r2) return true;
+ return IsAnyCompressedTagged(r1) && IsAnyCompressedTagged(r2);
+}
+
+bool ObjectMayAlias(Node* a, Node* b) {
+ if (a != b) {
+ if (b->opcode() == IrOpcode::kAllocate) {
+ std::swap(a, b);
+ }
+ if (a->opcode() == IrOpcode::kAllocate) {
+ switch (b->opcode()) {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kParameter:
+ return false;
+ default:
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2,
+ MachineRepresentation repr2) {
+ IntPtrMatcher matcher1(offset1);
+ IntPtrMatcher matcher2(offset2);
+ // If either of the offsets is variable, accesses may alias
+ if (!matcher1.HasValue() || !matcher2.HasValue()) {
+ return true;
+ }
+ // Otherwise, we return whether accesses overlap
+ intptr_t start1 = matcher1.Value();
+ intptr_t end1 = start1 + ElementSizeInBytes(repr1);
+ intptr_t start2 = matcher2.Value();
+ intptr_t end2 = start2 + ElementSizeInBytes(repr2);
+ return !(end1 <= start2 || end2 <= start1);
+}
+
+} // namespace CsaLoadEliminationHelpers
+
+namespace Helpers = CsaLoadEliminationHelpers;
+
+void CsaLoadElimination::AbstractState::Merge(AbstractState const* that,
+ Zone* zone) {
+ FieldInfo empty_info;
+ for (std::pair<Field, FieldInfo> entry : field_infos_) {
+ if (that->field_infos_.Get(entry.first) != entry.second) {
+ field_infos_.Set(entry.first, empty_info);
+ }
+ }
+}
+
+CsaLoadElimination::AbstractState const*
+CsaLoadElimination::AbstractState::KillField(Node* kill_object,
+ Node* kill_offset,
+ MachineRepresentation kill_repr,
+ Zone* zone) const {
+ FieldInfo empty_info;
+ AbstractState* that = new (zone) AbstractState(*this);
+ for (std::pair<Field, FieldInfo> entry : that->field_infos_) {
+ Field field = entry.first;
+ MachineRepresentation field_repr = entry.second.representation;
+ if (Helpers::OffsetMayAlias(kill_offset, kill_repr, field.second,
+ field_repr) &&
+ Helpers::ObjectMayAlias(kill_object, field.first)) {
+ that->field_infos_.Set(field, empty_info);
+ }
+ }
+ return that;
+}
+
+CsaLoadElimination::AbstractState const*
+CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
+ CsaLoadElimination::FieldInfo info,
+ Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->field_infos_.Set({object, offset}, info);
+ return that;
+}
+
+CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
+ Node* object, Node* offset) const {
+ if (object->IsDead()) {
+ return {};
+ }
+ return field_infos_.Get({object, offset});
+}
+
+void CsaLoadElimination::AbstractState::Print() const {
+ for (std::pair<Field, FieldInfo> entry : field_infos_) {
+ Field field = entry.first;
+ Node* object = field.first;
+ Node* offset = field.second;
+ FieldInfo info = entry.second;
+ PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(),
+ object->op()->mnemonic(), info.value->id(),
+ info.value->op()->mnemonic(),
+ MachineReprToString(info.representation));
+ }
+}
+
+Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
+ ObjectAccess const& access) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* offset = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ MachineRepresentation representation = access.machine_type.representation();
+ FieldInfo lookup_result = state->Lookup(object, offset);
+ if (!lookup_result.IsEmpty()) {
+ // Make sure we don't reuse values that were recorded with a different
+ // representation or resurrect dead {replacement} nodes.
+ Node* replacement = lookup_result.value;
+ if (Helpers::IsCompatible(representation, lookup_result.representation) &&
+ !replacement->IsDead()) {
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
+ }
+ }
+ FieldInfo info(node, representation);
+ state = state->AddField(object, offset, info, zone());
+
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceStoreToObject(Node* node,
+ ObjectAccess const& access) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* offset = NodeProperties::GetValueInput(node, 1);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ FieldInfo info(value, access.machine_type.representation());
+ state = state->KillField(object, offset, info.representation, zone());
+ state = state->AddField(object, offset, info, zone());
+
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) {
+ Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ AbstractState const* state0 = node_states_.Get(effect0);
+ if (state0 == nullptr) return NoChange();
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just take
+ // the state from the first input, and compute the loop state based on it.
+ AbstractState const* state = ComputeLoopState(node, state0);
+ return UpdateState(node, state);
+ }
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+ // Shortcut for the case when we do not know anything about some input.
+ int const input_count = node->op()->EffectInputCount();
+ for (int i = 1; i < input_count; ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (node_states_.Get(effect) == nullptr) return NoChange();
+ }
+
+ // Make a copy of the first input's state and merge with the state
+ // from other inputs.
+ AbstractState* state = new (zone()) AbstractState(*state0);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = NodeProperties::GetEffectInput(node, i);
+ state->Merge(node_states_.Get(input), zone());
+ }
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceStart(Node* node) {
+ return UpdateState(node, empty_state());
+}
+
+Reduction CsaLoadElimination::ReduceCall(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ ExternalReferenceMatcher m(value);
+ if (m.Is(ExternalReference::check_object_type())) {
+ return PropagateInputState(node);
+ }
+ return ReduceOtherNode(node);
+}
+
+Reduction CsaLoadElimination::ReduceOtherNode(Node* node) {
+ if (node->op()->EffectInputCount() == 1) {
+ if (node->op()->EffectOutputCount() == 1) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate
+ // just yet because we will have to recompute anyway once we compute
+ // the predecessor.
+ if (state == nullptr) return NoChange();
+ // Check if this {node} has some uncontrolled side effects.
+ if (!node->op()->HasProperty(Operator::kNoWrite)) {
+ state = empty_state();
+ }
+ return UpdateState(node, state);
+ } else {
+ return NoChange();
+ }
+ }
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+ return NoChange();
+}
+
+Reduction CsaLoadElimination::UpdateState(Node* node,
+ AbstractState const* state) {
+ AbstractState const* original = node_states_.Get(node);
+ // Only signal that the {node} has Changed, if the information about {state}
+ // has changed wrt. the {original}.
+ if (state != original) {
+ if (original == nullptr || !state->Equals(original)) {
+ node_states_.Set(node, state);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+Reduction CsaLoadElimination::PropagateInputState(Node* node) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ return UpdateState(node, state);
+}
+
+CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
+ Node* node, AbstractState const* state) const {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ Node* const control = NodeProperties::GetControlInput(node);
+ ZoneQueue<Node*> queue(zone());
+ ZoneSet<Node*> visited(zone());
+ visited.insert(node);
+ for (int i = 1; i < control->InputCount(); ++i) {
+ queue.push(node->InputAt(i));
+ }
+ while (!queue.empty()) {
+ Node* const current = queue.front();
+ queue.pop();
+ if (visited.insert(current).second) {
+ if (!current->op()->HasProperty(Operator::kNoWrite)) {
+ return empty_state();
+ }
+ for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
+ queue.push(NodeProperties::GetEffectInput(current, i));
+ }
+ }
+ }
+ return state;
+}
+
+CommonOperatorBuilder* CsaLoadElimination::common() const {
+ return jsgraph()->common();
+}
+
+Graph* CsaLoadElimination::graph() const { return jsgraph()->graph(); }
+
+Isolate* CsaLoadElimination::isolate() const { return jsgraph()->isolate(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
new file mode 100644
index 0000000000..9460858d04
--- /dev/null
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -0,0 +1,118 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CSA_LOAD_ELIMINATION_H_
+#define V8_COMPILER_CSA_LOAD_ELIMINATION_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/persistent-map.h"
+#include "src/handles/maybe-handles.h"
+#include "src/zone/zone-handle-set.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ObjectAccess;
+class Graph;
+class JSGraph;
+
+class V8_EXPORT_PRIVATE CsaLoadElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ CsaLoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ empty_state_(zone),
+ node_states_(jsgraph->graph()->NodeCount(), zone),
+ jsgraph_(jsgraph),
+ zone_(zone) {}
+ ~CsaLoadElimination() final = default;
+
+ const char* reducer_name() const override { return "CsaLoadElimination"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct FieldInfo {
+ FieldInfo() = default;
+ FieldInfo(Node* value, MachineRepresentation representation)
+ : value(value), representation(representation) {}
+
+ bool operator==(const FieldInfo& other) const {
+ return value == other.value && representation == other.representation;
+ }
+
+ bool operator!=(const FieldInfo& other) const { return !(*this == other); }
+
+ bool IsEmpty() const { return value == nullptr; }
+
+ Node* value = nullptr;
+ MachineRepresentation representation = MachineRepresentation::kNone;
+ };
+
+ class AbstractState final : public ZoneObject {
+ public:
+ explicit AbstractState(Zone* zone) : field_infos_(zone) {}
+
+ bool Equals(AbstractState const* that) const {
+ return field_infos_ == that->field_infos_;
+ }
+ void Merge(AbstractState const* that, Zone* zone);
+
+ AbstractState const* KillField(Node* object, Node* offset,
+ MachineRepresentation repr,
+ Zone* zone) const;
+ AbstractState const* AddField(Node* object, Node* offset, FieldInfo info,
+ Zone* zone) const;
+ FieldInfo Lookup(Node* object, Node* offset) const;
+
+ void Print() const;
+
+ private:
+ using Field = std::pair<Node*, Node*>;
+ using FieldInfos = PersistentMap<Field, FieldInfo>;
+ FieldInfos field_infos_;
+ };
+
+ Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access);
+ Reduction ReduceStoreToObject(Node* node, ObjectAccess const& access);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceCall(Node* node);
+ Reduction ReduceOtherNode(Node* node);
+
+ Reduction UpdateState(Node* node, AbstractState const* state);
+ Reduction PropagateInputState(Node* node);
+
+ AbstractState const* ComputeLoopState(Node* node,
+ AbstractState const* state) const;
+
+ CommonOperatorBuilder* common() const;
+ Isolate* isolate() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Zone* zone() const { return zone_; }
+ AbstractState const* empty_state() const { return &empty_state_; }
+
+ AbstractState const empty_state_;
+ NodeAuxData<AbstractState const*> node_states_;
+ JSGraph* const jsgraph_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(CsaLoadElimination);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CSA_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc
index e69e61fac5..537744652b 100644
--- a/deps/v8/src/compiler/decompression-elimination.cc
+++ b/deps/v8/src/compiler/decompression-elimination.cc
@@ -21,10 +21,8 @@ bool DecompressionElimination::IsReducibleConstantOpcode(
IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kInt64Constant:
- return true;
- // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
- // exists, since it breaks with verify CSA on.
case IrOpcode::kHeapConstant:
+ return true;
default:
return false;
}
@@ -55,13 +53,8 @@ Node* DecompressionElimination::GetCompressedConstant(Node* constant) {
static_cast<int32_t>(OpParameter<int64_t>(constant->op()))));
break;
case IrOpcode::kHeapConstant:
- // TODO(v8:8977): The HeapConstant remains as 64 bits. This does not
- // affect the comparison and it will still work correctly. However, we are
- // introducing a 64 bit value in the stream where a 32 bit one will
- // suffice. Currently there is no "CompressedHeapConstant", and
- // introducing a new opcode and handling it correctly throught the
- // pipeline seems that it will involve quite a bit of work.
- return constant;
+ return graph()->NewNode(
+ common()->CompressedHeapConstant(HeapConstantOf(constant->op())));
default:
UNREACHABLE();
}
@@ -84,6 +77,21 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) {
}
}
+Reduction DecompressionElimination::ReduceDecompress(Node* node) {
+ DCHECK(IrOpcode::IsDecompressOpcode(node->opcode()));
+
+ DCHECK_EQ(node->InputCount(), 1);
+ Node* input_node = node->InputAt(0);
+ IrOpcode::Value input_opcode = input_node->opcode();
+ if (IrOpcode::IsCompressOpcode(input_opcode)) {
+ DCHECK(IsValidDecompress(input_opcode, node->opcode()));
+ DCHECK_EQ(input_node->InputCount(), 1);
+ return Replace(input_node->InputAt(0));
+ } else {
+ return NoChange();
+ }
+}
+
Reduction DecompressionElimination::ReducePhi(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kPhi);
@@ -138,7 +146,10 @@ Reduction DecompressionElimination::ReducePhi(Node* node) {
// Add a decompress after the Phi. To do this, we need to replace the Phi with
// "Phi <- Decompress".
- return Replace(graph()->NewNode(op, node));
+ Node* decompress = graph()->NewNode(op, node);
+ ReplaceWithValue(node, decompress);
+ decompress->ReplaceInput(0, node);
+ return Changed(node);
}
Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) {
@@ -201,6 +212,10 @@ Reduction DecompressionElimination::Reduce(Node* node) {
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
return ReduceCompress(node);
+ case IrOpcode::kChangeCompressedToTagged:
+ case IrOpcode::kChangeCompressedSignedToTaggedSigned:
+ case IrOpcode::kChangeCompressedPointerToTaggedPointer:
+ return ReduceDecompress(node);
case IrOpcode::kPhi:
return ReducePhi(node);
case IrOpcode::kTypedStateValues:
diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h
index c850f064a9..85a6c98aa0 100644
--- a/deps/v8/src/compiler/decompression-elimination.h
+++ b/deps/v8/src/compiler/decompression-elimination.h
@@ -38,7 +38,7 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// elimination.
bool IsReducibleConstantOpcode(IrOpcode::Value opcode);
- // Get the new 32 bit node constant given the 64 bit one
+ // Get the new 32 bit node constant given the 64 bit one.
Node* GetCompressedConstant(Node* constant);
// Removes direct Decompressions & Compressions, going from
@@ -48,6 +48,9 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// Can be used for Any, Signed, and Pointer compressions.
Reduction ReduceCompress(Node* node);
+ // Removes direct Compressions & Decompressions, analogously to ReduceCompress
+ Reduction ReduceDecompress(Node* node);
+
// Replaces Phi's input decompressions with their input node, if and only if
// all of the Phi's inputs are Decompress nodes.
Reduction ReducePhi(Node* node);
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index cc6ca954f3..cac1b1726b 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -33,13 +33,13 @@ struct Diamond {
}
// Place {this} after {that} in control flow order.
- void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); }
+ void Chain(Diamond const& that) { branch->ReplaceInput(1, that.merge); }
// Place {this} after {that} in control flow order.
void Chain(Node* that) { branch->ReplaceInput(1, that); }
// Nest {this} into either the if_true or if_false branch of {that}.
- void Nest(Diamond& that, bool if_true) {
+ void Nest(Diamond const& that, bool if_true) {
if (if_true) {
branch->ReplaceInput(1, that.if_true);
that.merge->ReplaceInput(0, merge);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index ced078a178..788638fe68 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -17,6 +17,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/execution/frames.h"
#include "src/heap/factory-inl.h"
#include "src/objects/heap-number.h"
#include "src/objects/oddball.h"
@@ -51,6 +52,7 @@ class EffectControlLinearizer {
bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
Node** control);
Node* LowerChangeBitToTagged(Node* node);
+ Node* LowerChangeInt31ToCompressedSigned(Node* node);
Node* LowerChangeInt31ToTaggedSigned(Node* node);
Node* LowerChangeInt32ToTagged(Node* node);
Node* LowerChangeInt64ToTagged(Node* node);
@@ -58,6 +60,7 @@ class EffectControlLinearizer {
Node* LowerChangeUint64ToTagged(Node* node);
Node* LowerChangeFloat64ToTagged(Node* node);
Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+ Node* LowerChangeCompressedSignedToInt32(Node* node);
Node* LowerChangeTaggedSignedToInt32(Node* node);
Node* LowerChangeTaggedSignedToInt64(Node* node);
Node* LowerChangeTaggedToBit(Node* node);
@@ -75,6 +78,7 @@ class EffectControlLinearizer {
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckBigInt(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
void LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
@@ -84,6 +88,7 @@ class EffectControlLinearizer {
Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32ToCompressedSigned(Node* node, Node* frame_state);
Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
@@ -101,6 +106,9 @@ class EffectControlLinearizer {
Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerBigIntAsUintN(Node* node, Node* frame_state);
+ Node* LowerChangeUint64ToBigInt(Node* node);
+ Node* LowerTruncateBigIntToUint64(Node* node);
Node* LowerCheckedCompressedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedCompressedToTaggedPointer(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToCompressedSigned(Node* node, Node* frame_state);
@@ -150,17 +158,20 @@ class EffectControlLinearizer {
Node* LowerStringConcat(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharCodeAt(Node* node);
- Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
+ Node* LowerStringCodePointAt(Node* node);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromSingleCharCode(Node* node);
Node* LowerStringFromSingleCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
Node* LowerStringSubstring(Node* node);
+ Node* LowerStringFromCodePointAt(Node* node);
Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
+ Node* LowerBigIntAdd(Node* node, Node* frame_state);
+ Node* LowerBigIntNegate(Node* node);
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
Node* LowerConvertTaggedHoleToUndefined(Node* node);
@@ -186,6 +197,7 @@ class EffectControlLinearizer {
void LowerTransitionAndStoreNumberElement(Node* node);
void LowerTransitionAndStoreNonNumberElement(Node* node);
void LowerRuntimeAbort(Node* node);
+ Node* LowerAssertType(Node* node);
Node* LowerConvertReceiver(Node* node);
Node* LowerDateNow(Node* node);
@@ -214,6 +226,7 @@ class EffectControlLinearizer {
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
+ Node* ChangeInt32ToCompressedSmi(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
Node* ChangeInt64ToSmi(Node* value);
@@ -222,6 +235,7 @@ class EffectControlLinearizer {
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
+ Node* ChangeCompressedSmiToInt32(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeSmiToInt64(Node* value);
Node* ObjectIsSmi(Node* value);
@@ -827,6 +841,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeBitToTagged:
result = LowerChangeBitToTagged(node);
break;
+ case IrOpcode::kChangeInt31ToCompressedSigned:
+ result = LowerChangeInt31ToCompressedSigned(node);
+ break;
case IrOpcode::kChangeInt31ToTaggedSigned:
result = LowerChangeInt31ToTaggedSigned(node);
break;
@@ -848,6 +865,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeFloat64ToTaggedPointer:
result = LowerChangeFloat64ToTaggedPointer(node);
break;
+ case IrOpcode::kChangeCompressedSignedToInt32:
+ result = LowerChangeCompressedSignedToInt32(node);
+ break;
case IrOpcode::kChangeTaggedSignedToInt32:
result = LowerChangeTaggedSignedToInt32(node);
break;
@@ -911,6 +931,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckString:
result = LowerCheckString(node, frame_state);
break;
+ case IrOpcode::kCheckBigInt:
+ result = LowerCheckBigInt(node, frame_state);
+ break;
case IrOpcode::kCheckInternalizedString:
result = LowerCheckInternalizedString(node, frame_state);
break;
@@ -938,6 +961,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedInt32Mul:
result = LowerCheckedInt32Mul(node, frame_state);
break;
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
+ result = LowerCheckedInt32ToCompressedSigned(node, frame_state);
+ break;
case IrOpcode::kCheckedInt32ToTaggedSigned:
result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
break;
@@ -993,6 +1019,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTaggedToTaggedPointer:
result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
break;
+ case IrOpcode::kBigIntAsUintN:
+ result = LowerBigIntAsUintN(node, frame_state);
+ break;
+ case IrOpcode::kChangeUint64ToBigInt:
+ result = LowerChangeUint64ToBigInt(node);
+ break;
+ case IrOpcode::kTruncateBigIntToUint64:
+ result = LowerTruncateBigIntToUint64(node);
+ break;
case IrOpcode::kCheckedCompressedToTaggedSigned:
result = LowerCheckedCompressedToTaggedSigned(node, frame_state);
break;
@@ -1110,6 +1145,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
break;
+ case IrOpcode::kStringFromCodePointAt:
+ result = LowerStringFromCodePointAt(node);
+ break;
case IrOpcode::kStringLength:
result = LowerStringLength(node);
break;
@@ -1120,7 +1158,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerStringCharCodeAt(node);
break;
case IrOpcode::kStringCodePointAt:
- result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op()));
+ result = LowerStringCodePointAt(node);
break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
@@ -1140,6 +1178,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringLessThanOrEqual:
result = LowerStringLessThanOrEqual(node);
break;
+ case IrOpcode::kBigIntAdd:
+ result = LowerBigIntAdd(node, frame_state);
+ break;
+ case IrOpcode::kBigIntNegate:
+ result = LowerBigIntNegate(node);
+ break;
case IrOpcode::kNumberIsFloat64Hole:
result = LowerNumberIsFloat64Hole(node);
break;
@@ -1233,6 +1277,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kRuntimeAbort:
LowerRuntimeAbort(node);
break;
+ case IrOpcode::kAssertType:
+ result = LowerAssertType(node);
+ break;
case IrOpcode::kConvertReceiver:
result = LowerConvertReceiver(node);
break;
@@ -1357,6 +1404,11 @@ Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerChangeInt31ToCompressedSigned(Node* node) {
+ Node* value = node->InputAt(0);
+ return ChangeInt32ToCompressedSmi(value);
+}
+
Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* value = node->InputAt(0);
return ChangeInt32ToSmi(value);
@@ -1461,6 +1513,11 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
return ChangeSmiToInt32(value);
}
+Node* EffectControlLinearizer::LowerChangeCompressedSignedToInt32(Node* node) {
+ Node* value = node->InputAt(0);
+ return ChangeCompressedSmiToInt32(value);
+}
+
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
Node* value = node->InputAt(0);
return ChangeSmiToInt64(value);
@@ -1684,8 +1741,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToCompressedSigned(Node* node) {
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
vfalse = __ ChangeFloat64ToInt32(vfalse);
- vfalse = ChangeInt32ToSmi(vfalse);
- vfalse = __ ChangeTaggedSignedToCompressedSigned(vfalse);
+ vfalse = ChangeInt32ToCompressedSmi(vfalse);
__ Goto(&done, vfalse);
__ Bind(&done);
@@ -2283,6 +2339,19 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
return value;
}
+Node* EffectControlLinearizer::LowerCheckedInt32ToCompressedSigned(
+ Node* node, Node* frame_state) {
+ DCHECK(SmiValuesAre31Bits());
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* add = __ Int32AddWithOverflow(value, value);
+ Node* check = __ Projection(1, add);
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+ return __ Projection(0, add);
+}
+
Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
@@ -2651,6 +2720,121 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
return value;
}
+Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ // Check for Smi.
+ Node* smi_check = ObjectIsSmi(value);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), smi_check,
+ frame_state);
+
+ // Check for BigInt.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* bi_check = __ WordEqual(value_map, __ BigIntMapConstant());
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
+ bi_check, frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntAsUintN(Node* node,
+ Node* frame_state) {
+ DCHECK(machine()->Is64());
+
+ const int bits = OpParameter<int>(node->op());
+ DCHECK(0 <= bits && bits <= 64);
+
+ if (bits == 64) {
+ // Reduce to nop.
+ return node->InputAt(0);
+ } else {
+ const uint64_t msk = (1ULL << bits) - 1ULL;
+ return __ Word64And(node->InputAt(0), __ Int64Constant(msk));
+ }
+}
+
+Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
+ DCHECK(machine()->Is64());
+
+ Node* value = node->InputAt(0);
+ Node* map = jsgraph()->HeapConstant(factory()->bigint_map());
+ // BigInts with value 0 must be of size 0 (canonical form).
+ auto if_zerodigits = __ MakeLabel();
+ auto if_onedigit = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &if_zerodigits);
+ __ Goto(&if_onedigit);
+
+ __ Bind(&if_onedigit);
+ {
+ Node* result = __ Allocate(AllocationType::kYoung,
+ __ IntPtrConstant(BigInt::SizeFor(1)));
+ const auto bitfield = BigInt::LengthBits::update(0, 1);
+ __ StoreField(AccessBuilder::ForMap(), result, map);
+ __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
+ __ IntPtrConstant(bitfield));
+ // BigInts have no padding on 64 bit architectures with pointer compression.
+ if (BigInt::HasOptionalPadding()) {
+ __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
+ __ IntPtrConstant(0));
+ }
+ __ StoreField(AccessBuilder::ForBigIntLeastSignificantDigit64(), result,
+ value);
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&if_zerodigits);
+ {
+ Node* result = __ Allocate(AllocationType::kYoung,
+ __ IntPtrConstant(BigInt::SizeFor(0)));
+ const auto bitfield = BigInt::LengthBits::update(0, 0);
+ __ StoreField(AccessBuilder::ForMap(), result, map);
+ __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
+ __ IntPtrConstant(bitfield));
+ // BigInts have no padding on 64 bit architectures with pointer compression.
+ if (BigInt::HasOptionalPadding()) {
+ __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
+ __ IntPtrConstant(0));
+ }
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerTruncateBigIntToUint64(Node* node) {
+ DCHECK(machine()->Is64());
+
+ auto done = __ MakeLabel(MachineRepresentation::kWord64);
+ auto if_neg = __ MakeLabel();
+ auto if_not_zero = __ MakeLabel();
+
+ Node* value = node->InputAt(0);
+
+ Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
+ __ GotoIfNot(__ Word32Equal(bitfield, __ Int32Constant(0)), &if_not_zero);
+ __ Goto(&done, __ Int64Constant(0));
+
+ __ Bind(&if_not_zero);
+ {
+ Node* lsd =
+ __ LoadField(AccessBuilder::ForBigIntLeastSignificantDigit64(), value);
+ Node* sign =
+ __ Word32And(bitfield, __ Int32Constant(BigInt::SignBits::kMask));
+ __ GotoIf(__ Word32Equal(sign, __ Int32Constant(1)), &if_neg);
+ __ Goto(&done, lsd);
+
+ __ Bind(&if_neg);
+ __ Goto(&done, __ Int64Sub(__ Int64Constant(0), lsd));
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -3726,16 +3910,12 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
return loop_done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerStringCodePointAt(
- Node* node, UnicodeEncoding encoding) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Builtins::Name builtin = encoding == UnicodeEncoding::UTF16
- ? Builtins::kStringCodePointAtUTF16
- : Builtins::kStringCodePointAtUTF32;
-
- Callable const callable = Builtins::CallableFor(isolate(), builtin);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -3968,31 +4148,23 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
__ Bind(&if_not_single_code);
// Generate surrogate pair string
{
- switch (UnicodeEncodingOf(node->op())) {
- case UnicodeEncoding::UTF16:
- break;
+ // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
+ Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
- case UnicodeEncoding::UTF32: {
- // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
- Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
+ // lead = (codepoint >> 10) + LEAD_OFFSET
+ Node* lead =
+ __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
- // lead = (codepoint >> 10) + LEAD_OFFSET
- Node* lead =
- __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
+ // trail = (codepoint & 0x3FF) + 0xDC00;
+ Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
+ __ Int32Constant(0xDC00));
- // trail = (codepoint & 0x3FF) + 0xDC00;
- Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
- __ Int32Constant(0xDC00));
-
- // codpoint = (trail << 16) | lead;
+ // codpoint = (trail << 16) | lead;
#if V8_TARGET_BIG_ENDIAN
- code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
+ code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
#else
- code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
+ code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
#endif
- break;
- }
- }
// Allocate a new SeqTwoByteString for {code}.
Node* vfalse0 =
@@ -4032,6 +4204,21 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
search_string, position, __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) {
+ Node* string = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringFromCodePointAt);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
+ index, __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerStringLength(Node* node) {
Node* subject = node->InputAt(0);
@@ -4083,6 +4270,41 @@ Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringLessThanOrEqual), node);
}
+Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kBigIntAddNoThrow);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kFoldable | Operator::kNoThrow);
+ Node* value =
+ __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
+
+ // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
+ __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, VectorSlotPair{},
+ ObjectIsSmi(value), frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kBigIntUnaryMinus);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kFoldable | Operator::kNoThrow);
+ Node* value =
+ __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()),
+ node->InputAt(0), __ NoContextConstant());
+
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* frame_state) {
// If we reach this point w/o eliminating the {node} that's marked
@@ -4256,6 +4478,11 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
return value;
}
+Node* EffectControlLinearizer::ChangeInt32ToCompressedSmi(Node* value) {
+ CHECK(machine()->Is64() && SmiValuesAre31Bits());
+ return __ Word32Shl(value, SmiShiftBitsConstant());
+}
+
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
@@ -4305,6 +4532,11 @@ Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
return ChangeSmiToIntPtr(value);
}
+Node* EffectControlLinearizer::ChangeCompressedSmiToInt32(Node* value) {
+ CHECK(machine()->Is64() && SmiValuesAre31Bits());
+ return __ Word32Sar(value, SmiShiftBitsConstant());
+}
+
Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
CHECK(machine()->Is64());
return ChangeSmiToIntPtr(value);
@@ -5163,6 +5395,30 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
__ Int32Constant(1), __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerAssertType(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAssertType);
+ Type type = OpParameter<Type>(node->op());
+ DCHECK(type.IsRange());
+ auto range = type.AsRange();
+
+ Node* const input = node->InputAt(0);
+ Node* const min = __ NumberConstant(range->Min());
+ Node* const max = __ NumberConstant(range->Max());
+
+ {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kCheckNumberInRange);
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ __ Call(call_descriptor, __ HeapConstant(callable.code()), input, min, max,
+ __ NoContextConstant());
+ return input;
+ }
+}
+
Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op());
Node* value = node->InputAt(0);
@@ -5187,7 +5443,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
__ GotoIf(check, &convert_to_object);
__ Goto(&done_convert, value);
- // Wrap the primitive {value} into a JSValue.
+ // Wrap the primitive {value} into a JSPrimitiveWrapper.
__ Bind(&convert_to_object);
Operator::Properties properties = Operator::kEliminatable;
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
@@ -5220,7 +5476,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
__ GotoIf(check, &convert_to_object);
__ Goto(&done_convert, value);
- // Wrap the primitive {value} into a JSValue.
+ // Wrap the primitive {value} into a JSPrimitiveWrapper.
__ Bind(&convert_to_object);
__ GotoIf(__ WordEqual(value, __ UndefinedConstant()),
&convert_global_proxy);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index dc0db4d780..aee0121384 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -4,6 +4,7 @@
#include "src/compiler/escape-analysis.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -153,6 +154,7 @@ class VariableTracker {
ZoneVector<Node*> buffer_;
EffectGraphReducer* reducer_;
int next_variable_ = 0;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(VariableTracker);
};
@@ -279,12 +281,14 @@ class EscapeAnalysisTracker : public ZoneObject {
};
EffectGraphReducer::EffectGraphReducer(
- Graph* graph, std::function<void(Node*, Reduction*)> reduce, Zone* zone)
+ Graph* graph, std::function<void(Node*, Reduction*)> reduce,
+ TickCounter* tick_counter, Zone* zone)
: graph_(graph),
state_(graph, kNumStates),
revisit_(zone),
stack_(zone),
- reduce_(std::move(reduce)) {}
+ reduce_(std::move(reduce)),
+ tick_counter_(tick_counter) {}
void EffectGraphReducer::ReduceFrom(Node* node) {
// Perform DFS and eagerly trigger revisitation as soon as possible.
@@ -293,6 +297,7 @@ void EffectGraphReducer::ReduceFrom(Node* node) {
DCHECK(stack_.empty());
stack_.push({node, 0});
while (!stack_.empty()) {
+ tick_counter_->DoTick();
Node* current = stack_.top().node;
int& input_index = stack_.top().input_index;
if (input_index < current->InputCount()) {
@@ -357,7 +362,8 @@ VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer,
graph_(graph),
table_(zone, State(zone)),
buffer_(zone),
- reducer_(reducer) {}
+ reducer_(reducer),
+ tick_counter_(reducer->tick_counter()) {}
VariableTracker::Scope::Scope(VariableTracker* states, Node* node,
Reduction* reduction)
@@ -406,6 +412,7 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0));
State result = first_input;
for (std::pair<Variable, Node*> var_value : first_input) {
+ tick_counter_->DoTick();
if (Node* value = var_value.second) {
Variable var = var_value.first;
TRACE("var %i:\n", var.id_);
@@ -441,10 +448,12 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
// [old_value] cannot originate from the inputs. Thus [old_value]
// must have been created by a previous reduction of this [effect_phi].
for (int i = 0; i < arity; ++i) {
- NodeProperties::ReplaceValueInput(
- old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i);
- // This change cannot affect the rest of the reducer, so there is no
- // need to trigger additional revisitations.
+ Node* old_input = NodeProperties::GetValueInput(old_value, i);
+ Node* new_input = buffer_[i] ? buffer_[i] : graph_->Dead();
+ if (old_input != new_input) {
+ NodeProperties::ReplaceValueInput(old_value, new_input, i);
+ reducer_->Revisit(old_value);
+ }
}
result.Set(var, old_value);
} else {
@@ -701,21 +710,19 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
} else if (right_object && !right_object->HasEscaped()) {
replacement = jsgraph->FalseConstant();
}
- if (replacement) {
- // TODO(tebbi) This is a workaround for uninhabited types. If we
- // replaced a value of uninhabited type with a constant, we would
- // widen the type of the node. This could produce inconsistent
- // types (which might confuse representation selection). We get
- // around this by refusing to constant-fold and escape-analyze
- // if the type is not inhabited.
- if (!NodeProperties::GetType(left).IsNone() &&
- !NodeProperties::GetType(right).IsNone()) {
- current->SetReplacement(replacement);
- } else {
- current->SetEscaped(left);
- current->SetEscaped(right);
- }
+ // TODO(tebbi) This is a workaround for uninhabited types. If we
+ // replaced a value of uninhabited type with a constant, we would
+ // widen the type of the node. This could produce inconsistent
+ // types (which might confuse representation selection). We get
+ // around this by refusing to constant-fold and escape-analyze
+ // if the type is not inhabited.
+ if (replacement && !NodeProperties::GetType(left).IsNone() &&
+ !NodeProperties::GetType(right).IsNone()) {
+ current->SetReplacement(replacement);
+ break;
}
+ current->SetEscaped(left);
+ current->SetEscaped(right);
break;
}
case IrOpcode::kCheckMaps: {
@@ -817,11 +824,12 @@ void EscapeAnalysis::Reduce(Node* node, Reduction* reduction) {
ReduceNode(op, &current, jsgraph());
}
-EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone)
+EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter,
+ Zone* zone)
: EffectGraphReducer(
jsgraph->graph(),
[this](Node* node, Reduction* reduction) { Reduce(node, reduction); },
- zone),
+ tick_counter, zone),
tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)),
jsgraph_(jsgraph) {}
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index c3dcd2f74d..0fbc7d0bdd 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -14,6 +14,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
class CommonOperatorBuilder;
@@ -38,7 +41,8 @@ class EffectGraphReducer {
};
EffectGraphReducer(Graph* graph,
- std::function<void(Node*, Reduction*)> reduce, Zone* zone);
+ std::function<void(Node*, Reduction*)> reduce,
+ TickCounter* tick_counter, Zone* zone);
void ReduceGraph() { ReduceFrom(graph_->end()); }
@@ -56,6 +60,8 @@ class EffectGraphReducer {
bool Complete() { return stack_.empty() && revisit_.empty(); }
+ TickCounter* tick_counter() const { return tick_counter_; }
+
private:
struct NodeState {
Node* node;
@@ -69,6 +75,7 @@ class EffectGraphReducer {
ZoneStack<Node*> revisit_;
ZoneStack<NodeState> stack_;
std::function<void(Node*, Reduction*)> reduce_;
+ TickCounter* const tick_counter_;
};
// A variable is an abstract storage location, which is lowered to SSA values
@@ -164,7 +171,7 @@ class EscapeAnalysisResult {
class V8_EXPORT_PRIVATE EscapeAnalysis final
: public NON_EXPORTED_BASE(EffectGraphReducer) {
public:
- EscapeAnalysis(JSGraph* jsgraph, Zone* zone);
+ EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter, Zone* zone);
EscapeAnalysisResult analysis_result() {
DCHECK(Complete());
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index cc9dbd9dfd..50f29d968b 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -52,6 +52,9 @@ Node* GraphAssembler::HeapConstant(Handle<HeapObject> object) {
return jsgraph()->HeapConstant(object);
}
+Node* GraphAssembler::NumberConstant(double value) {
+ return jsgraph()->Constant(value);
+}
Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
return jsgraph()->ExternalConstant(ref);
@@ -221,6 +224,12 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) {
current_effect_, current_control_);
}
+Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value,
+ current_effect_, current_control_);
+}
+
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
return current_effect_ =
graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 74b885b788..e2c0005d15 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -57,6 +57,7 @@ namespace compiler {
V(Word32Shr) \
V(Word32Shl) \
V(Word32Sar) \
+ V(Word64And) \
V(IntAdd) \
V(IntSub) \
V(IntMul) \
@@ -71,6 +72,7 @@ namespace compiler {
V(Uint64LessThan) \
V(Uint64LessThanOrEqual) \
V(Int32LessThan) \
+ V(Int64Sub) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Div) \
@@ -93,22 +95,24 @@ namespace compiler {
V(Uint32Mod) \
V(Uint32Div)
-#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
- V(TrueConstant) \
- V(FalseConstant) \
- V(NullConstant) \
- V(BigIntMapConstant) \
- V(BooleanMapConstant) \
- V(HeapNumberMapConstant) \
- V(NoContextConstant) \
- V(EmptyStringConstant) \
- V(UndefinedConstant) \
- V(TheHoleConstant) \
- V(FixedArrayMapConstant) \
- V(FixedDoubleArrayMapConstant) \
- V(ToNumberBuiltinConstant) \
- V(AllocateInYoungGenerationStubConstant) \
- V(AllocateInOldGenerationStubConstant)
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(NullConstant) \
+ V(BigIntMapConstant) \
+ V(BooleanMapConstant) \
+ V(HeapNumberMapConstant) \
+ V(NoContextConstant) \
+ V(EmptyStringConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(FixedArrayMapConstant) \
+ V(FixedDoubleArrayMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(AllocateInYoungGenerationStubConstant) \
+ V(AllocateRegularInYoungGenerationStubConstant) \
+ V(AllocateInOldGenerationStubConstant) \
+ V(AllocateRegularInOldGenerationStubConstant)
class GraphAssembler;
@@ -196,6 +200,7 @@ class GraphAssembler {
Node* Float64Constant(double value);
Node* Projection(int index, Node* value);
Node* HeapConstant(Handle<HeapObject> object);
+ Node* NumberConstant(double value);
Node* CEntryStubConstant(int result_size);
Node* ExternalConstant(ExternalReference ref);
@@ -225,6 +230,7 @@ class GraphAssembler {
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
Node* BitcastTaggedToWord(Node* value);
+ Node* BitcastTaggedSignedToWord(Node* value);
Node* Allocate(AllocationType allocation, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index fafa322d87..9a0dea6b26 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -5,10 +5,11 @@
#include <functional>
#include <limits>
-#include "src/compiler/graph.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/node.h"
+#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/verifier.h"
namespace v8 {
@@ -25,13 +26,15 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
-GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
+GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
+ Node* dead)
: graph_(graph),
dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
- stack_(zone) {
+ stack_(zone),
+ tick_counter_(tick_counter) {
if (dead != nullptr) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -82,6 +85,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
auto skip = reducers_.end();
for (auto i = reducers_.begin(); i != reducers_.end();) {
if (i != skip) {
+ tick_counter_->DoTick();
Reduction reduction = (*i)->Reduce(node);
if (!reduction.Changed()) {
// No change from this reducer.
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 3bb20a4625..bbcc67b074 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -12,13 +12,15 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
class Graph;
class Node;
-
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
using NodeId = uint32_t;
@@ -129,7 +131,8 @@ class AdvancedReducer : public Reducer {
class V8_EXPORT_PRIVATE GraphReducer
: public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
- GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
+ GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
+ Node* dead = nullptr);
~GraphReducer() override;
Graph* graph() const { return graph_; }
@@ -181,6 +184,7 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneVector<Reducer*> reducers_;
ZoneQueue<Node*> revisit_;
ZoneStack<NodeState> stack_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(GraphReducer);
};
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
new file mode 100644
index 0000000000..5547039fa6
--- /dev/null
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -0,0 +1,906 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_HEAP_REFS_H_
+#define V8_COMPILER_HEAP_REFS_H_
+
+#include "src/base/optional.h"
+#include "src/ic/call-optimization.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/instance-type.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+class CallHandlerInfo;
+class FixedDoubleArray;
+class FunctionTemplateInfo;
+class HeapNumber;
+class InternalizedString;
+class JSBoundFunction;
+class JSDataView;
+class JSGlobalProxy;
+class JSRegExp;
+class JSTypedArray;
+class NativeContext;
+class ScriptContextTable;
+class VectorSlotPair;
+
+namespace compiler {
+
+// Whether we are loading a property or storing to a property.
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
+
+enum class OddballType : uint8_t {
+ kNone, // Not an Oddball.
+ kBoolean, // True or False.
+ kUndefined,
+ kNull,
+ kHole,
+ kUninitialized,
+ kOther // Oddball, but none of the above.
+};
+
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
+#define HEAP_BROKER_OBJECT_LIST(V) \
+ /* Subtypes of JSObject */ \
+ V(JSArray) \
+ V(JSBoundFunction) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSGlobalProxy) \
+ V(JSRegExp) \
+ V(JSTypedArray) \
+ /* Subtypes of Context */ \
+ V(NativeContext) \
+ /* Subtypes of FixedArray */ \
+ V(Context) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(InternalizedString) \
+ V(String) \
+ V(Symbol) \
+ /* Subtypes of HeapObject */ \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(CallHandlerInfo) \
+ V(Cell) \
+ V(Code) \
+ V(DescriptorArray) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArrayBase) \
+ V(FunctionTemplateInfo) \
+ V(HeapNumber) \
+ V(JSObject) \
+ V(Map) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(PropertyCell) \
+ V(SharedFunctionInfo) \
+ V(SourceTextModule) \
+ /* Subtypes of Object */ \
+ V(HeapObject)
+
+class CompilationDependencies;
+class JSHeapBroker;
+class ObjectData;
+class PerIsolateCompilerCache;
+class PropertyAccessInfo;
+#define FORWARD_DECL(Name) class Name##Ref;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+class V8_EXPORT_PRIVATE ObjectRef {
+ public:
+ ObjectRef(JSHeapBroker* broker, Handle<Object> object);
+ ObjectRef(JSHeapBroker* broker, ObjectData* data)
+ : data_(data), broker_(broker) {
+ CHECK_NOT_NULL(data_);
+ }
+
+ Handle<Object> object() const;
+
+ bool equals(const ObjectRef& other) const;
+
+ bool IsSmi() const;
+ int AsSmi() const;
+
+#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+#undef HEAP_IS_METHOD_DECL
+
+#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+#undef HEAP_AS_METHOD_DECL
+
+ bool IsNullOrUndefined() const;
+
+ bool BooleanValue() const;
+ Maybe<double> OddballToNumber() const;
+
+ // Return the element at key {index} if {index} is known to be an own data
+ // property of the object that is non-writable and non-configurable.
+ base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index,
+ bool serialize = false) const;
+
+ Isolate* isolate() const;
+
+ struct Hash {
+ size_t operator()(const ObjectRef& ref) const {
+ return base::hash_combine(ref.object().address());
+ }
+ };
+ struct Equal {
+ bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const {
+ return lhs.equals(rhs);
+ }
+ };
+
+ protected:
+ JSHeapBroker* broker() const;
+ ObjectData* data() const;
+ ObjectData* data_; // Should be used only by object() getters.
+
+ private:
+ friend class FunctionTemplateInfoRef;
+ friend class JSArrayData;
+ friend class JSGlobalProxyRef;
+ friend class JSGlobalProxyData;
+ friend class JSObjectData;
+ friend class StringData;
+
+ friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
+
+ JSHeapBroker* broker_;
+};
+
+// Temporary class that carries information from a Map. We'd like to remove
+// this class and use MapRef instead, but we can't as long as we support the
+// kDisabled broker mode. That's because obtaining the MapRef via
+// HeapObjectRef::map() requires a HandleScope when the broker is disabled.
+// During OptimizeGraph we generally don't have a HandleScope, however. There
+// are two places where we therefore use GetHeapObjectType() instead. Both that
+// function and this class should eventually be removed.
+class HeapObjectType {
+ public:
+ enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
+
+ using Flags = base::Flags<Flag>;
+
+ HeapObjectType(InstanceType instance_type, Flags flags,
+ OddballType oddball_type)
+ : instance_type_(instance_type),
+ oddball_type_(oddball_type),
+ flags_(flags) {
+ DCHECK_EQ(instance_type == ODDBALL_TYPE,
+ oddball_type != OddballType::kNone);
+ }
+
+ OddballType oddball_type() const { return oddball_type_; }
+ InstanceType instance_type() const { return instance_type_; }
+ Flags flags() const { return flags_; }
+
+ bool is_callable() const { return flags_ & kCallable; }
+ bool is_undetectable() const { return flags_ & kUndetectable; }
+
+ private:
+ InstanceType const instance_type_;
+ OddballType const oddball_type_;
+ Flags const flags_;
+};
+
+class HeapObjectRef : public ObjectRef {
+ public:
+ using ObjectRef::ObjectRef;
+ Handle<HeapObject> object() const;
+
+ MapRef map() const;
+
+ // See the comment on the HeapObjectType class.
+ HeapObjectType GetHeapObjectType() const;
+};
+
+class PropertyCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<PropertyCell> object() const;
+
+ PropertyDetails property_details() const;
+
+ void Serialize();
+ ObjectRef value() const;
+};
+
+class JSObjectRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<JSObject> object() const;
+
+ uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
+ double RawFastDoublePropertyAt(FieldIndex index) const;
+ ObjectRef RawFastPropertyAt(FieldIndex index) const;
+
+ // Return the value of the property identified by the field {index}
+ // if {index} is known to be an own data property of the object.
+ base::Optional<ObjectRef> GetOwnProperty(Representation field_representation,
+ FieldIndex index,
+ bool serialize = false) const;
+
+ FixedArrayBaseRef elements() const;
+ void SerializeElements();
+ void EnsureElementsTenured();
+ ElementsKind GetElementsKind() const;
+
+ void SerializeObjectCreateMap();
+ base::Optional<MapRef> GetObjectCreateMap() const;
+};
+
+class JSDataViewRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSDataView> object() const;
+
+ size_t byte_length() const;
+ size_t byte_offset() const;
+};
+
+class JSBoundFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSBoundFunction> object() const;
+
+ void Serialize();
+
+ // The following are available only after calling Serialize().
+ ObjectRef bound_target_function() const;
+ ObjectRef bound_this() const;
+ FixedArrayRef bound_arguments() const;
+};
+
+class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSFunction> object() const;
+
+ bool has_feedback_vector() const;
+ bool has_initial_map() const;
+ bool has_prototype() const;
+ bool PrototypeRequiresRuntimeLookup() const;
+
+ void Serialize();
+ bool serialized() const;
+
+ // The following are available only after calling Serialize().
+ ObjectRef prototype() const;
+ MapRef initial_map() const;
+ ContextRef context() const;
+ NativeContextRef native_context() const;
+ SharedFunctionInfoRef shared() const;
+ FeedbackVectorRef feedback_vector() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
+
+ bool IsSerializedForCompilation() const;
+};
+
+class JSRegExpRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSRegExp> object() const;
+
+ ObjectRef raw_properties_or_hash() const;
+ ObjectRef data() const;
+ ObjectRef source() const;
+ ObjectRef flags() const;
+ ObjectRef last_index() const;
+};
+
+class HeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<HeapNumber> object() const;
+
+ double value() const;
+};
+
+class MutableHeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<MutableHeapNumber> object() const;
+
+ double value() const;
+};
+
+class ContextRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Context> object() const;
+
+ // {previous} decrements {depth} by 1 for each previous link successfully
+ // followed. If {depth} != 0 on function return, then it only got
+ // partway to the desired depth. If {serialize} is true, then
+ // {previous} will cache its findings.
+ ContextRef previous(size_t* depth, bool serialize = false) const;
+
+ // Only returns a value if the index is valid for this ContextRef.
+ base::Optional<ObjectRef> get(int index, bool serialize = false) const;
+
+ // We only serialize the ScopeInfo if certain Promise
+ // builtins are called.
+ void SerializeScopeInfo();
+ base::Optional<ScopeInfoRef> scope_info() const;
+};
+
+#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, boolean_function) \
+ V(JSFunction, bigint_function) \
+ V(JSFunction, number_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(JSFunction, promise_then) \
+ V(JSFunction, string_function) \
+ V(JSFunction, symbol_function) \
+ V(JSGlobalProxy, global_proxy_object) \
+ V(JSObject, promise_prototype) \
+ V(Map, bound_function_with_constructor_map) \
+ V(Map, bound_function_without_constructor_map) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
+ V(ScriptContextTable, script_context_table) \
+ V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
+ V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_then_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
+
+// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
+// happened when Turbofan is invoked via --always-opt.
+#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, async_function_object_map) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(JSFunction, regexp_exec_function) \
+ V(Map, set_key_value_iterator_map) \
+ V(Map, set_value_iterator_map)
+
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
+
+class NativeContextRef : public ContextRef {
+ public:
+ using ContextRef::ContextRef;
+ Handle<NativeContext> object() const;
+
+ void Serialize();
+
+#define DECL_ACCESSOR(type, name) type##Ref name() const;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ ScopeInfoRef scope_info() const;
+ MapRef GetFunctionMapFromIndex(int index) const;
+ MapRef GetInitialJSArrayMap(ElementsKind kind) const;
+ base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
+};
+
+class NameRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Name> object() const;
+
+ bool IsUniqueName() const;
+};
+
+class ScriptContextTableRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<ScriptContextTable> object() const;
+
+ struct LookupResult {
+ ContextRef context;
+ bool immutable;
+ int index;
+ };
+
+ base::Optional<LookupResult> lookup(const NameRef& name) const;
+};
+
+class DescriptorArrayRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<DescriptorArray> object() const;
+};
+
+class FeedbackCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackCell> object() const;
+
+ HeapObjectRef value() const;
+};
+
+class FeedbackVectorRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackVector> object() const;
+
+ ObjectRef get(FeedbackSlot slot) const;
+
+ void SerializeSlots();
+};
+
+class CallHandlerInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<CallHandlerInfo> object() const;
+
+ Address callback() const;
+
+ void Serialize();
+ ObjectRef data() const;
+};
+
+class AllocationSiteRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<AllocationSite> object() const;
+
+ bool PointsToLiteral() const;
+ AllocationType GetAllocationType() const;
+ ObjectRef nested_site() const;
+
+ // {IsFastLiteral} determines whether the given array or object literal
+ // boilerplate satisfies all limits to be considered for fast deep-copying
+ // and computes the total size of all objects that are part of the graph.
+ //
+ // If PointsToLiteral() is false, then IsFastLiteral() is also false.
+ bool IsFastLiteral() const;
+ // We only serialize boilerplate if IsFastLiteral is true.
+ base::Optional<JSObjectRef> boilerplate() const;
+
+ ElementsKind GetElementsKind() const;
+ bool CanInlineCall() const;
+};
+
+class BigIntRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<BigInt> object() const;
+
+ uint64_t AsUint64() const;
+};
+
+class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Map> object() const;
+
+ int instance_size() const;
+ InstanceType instance_type() const;
+ int GetInObjectProperties() const;
+ int GetInObjectPropertiesStartInWords() const;
+ int NumberOfOwnDescriptors() const;
+ int GetInObjectPropertyOffset(int index) const;
+ int constructor_function_index() const;
+ int NextFreePropertyIndex() const;
+ int UnusedPropertyFields() const;
+ ElementsKind elements_kind() const;
+ bool is_stable() const;
+ bool is_extensible() const;
+ bool is_constructor() const;
+ bool has_prototype_slot() const;
+ bool is_access_check_needed() const;
+ bool is_deprecated() const;
+ bool CanBeDeprecated() const;
+ bool CanTransition() const;
+ bool IsInobjectSlackTrackingInProgress() const;
+ bool is_dictionary_map() const;
+ bool IsFixedCowArrayMap() const;
+ bool IsPrimitiveMap() const;
+ bool is_undetectable() const;
+ bool is_callable() const;
+ bool has_indexed_interceptor() const;
+ bool is_migration_target() const;
+ bool supports_fast_array_iteration() const;
+ bool supports_fast_array_resize() const;
+ bool IsMapOfCurrentGlobalProxy() const;
+
+ OddballType oddball_type() const;
+
+#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
+ INSTANCE_TYPE_CHECKERS(DEF_TESTER)
+#undef DEF_TESTER
+
+ void SerializeBackPointer();
+ HeapObjectRef GetBackPointer() const;
+
+ void SerializePrototype();
+ bool serialized_prototype() const;
+ HeapObjectRef prototype() const;
+
+ void SerializeForElementLoad();
+
+ void SerializeForElementStore();
+ bool HasOnlyStablePrototypesWithFastElements(
+ ZoneVector<MapRef>* prototype_maps);
+
+ // Concerning the underlying instance_descriptors:
+ void SerializeOwnDescriptors();
+ void SerializeOwnDescriptor(int descriptor_index);
+ MapRef FindFieldOwner(int descriptor_index) const;
+ PropertyDetails GetPropertyDetails(int descriptor_index) const;
+ NameRef GetPropertyKey(int descriptor_index) const;
+ FieldIndex GetFieldIndexFor(int descriptor_index) const;
+ ObjectRef GetFieldType(int descriptor_index) const;
+ bool IsUnboxedDoubleField(int descriptor_index) const;
+
+ // Available after calling JSFunctionRef::Serialize on a function that has
+ // this map as initial map.
+ ObjectRef GetConstructor() const;
+ base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
+};
+
+struct HolderLookupResult {
+ HolderLookupResult(CallOptimization::HolderLookup lookup_ =
+ CallOptimization::kHolderNotFound,
+ base::Optional<JSObjectRef> holder_ = base::nullopt)
+ : lookup(lookup_), holder(holder_) {}
+ CallOptimization::HolderLookup lookup;
+ base::Optional<JSObjectRef> holder;
+};
+
+class FunctionTemplateInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FunctionTemplateInfo> object() const;
+
+ bool is_signature_undefined() const;
+ bool accept_any_receiver() const;
+ // The following returns true if the CallHandlerInfo is present.
+ bool has_call_code() const;
+
+ void SerializeCallCode();
+ base::Optional<CallHandlerInfoRef> call_code() const;
+
+ HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map,
+ bool serialize);
+};
+
+class FixedArrayBaseRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FixedArrayBase> object() const;
+
+ int length() const;
+};
+
+class FixedArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedArray> object() const;
+
+ ObjectRef get(int i) const;
+};
+
+class FixedDoubleArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedDoubleArray> object() const;
+
+ double get_scalar(int i) const;
+ bool is_the_hole(int i) const;
+};
+
+class BytecodeArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<BytecodeArray> object() const;
+
+ int register_count() const;
+ int parameter_count() const;
+ interpreter::Register incoming_new_target_or_generator_register() const;
+
+ // Bytecode access methods.
+ uint8_t get(int index) const;
+ Address GetFirstBytecodeAddress() const;
+
+ // Source position table.
+ const byte* source_positions_address() const;
+ int source_positions_size() const;
+
+ // Constant pool access.
+ Handle<Object> GetConstantAtIndex(int index) const;
+ bool IsConstantAtIndexSmi(int index) const;
+ Smi GetConstantAtIndexAsSmi(int index) const;
+
+ // Exception handler table.
+ Address handler_table_address() const;
+ int handler_table_size() const;
+
+ bool IsSerializedForCompilation() const;
+ void SerializeForCompilation();
+};
+
+class JSArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSArray> object() const;
+
+ ObjectRef length() const;
+
+ // Return the element at key {index} if the array has a copy-on-write elements
+ // storage and {index} is known to be an own data property.
+ base::Optional<ObjectRef> GetOwnCowElement(uint32_t index,
+ bool serialize = false) const;
+};
+
+class ScopeInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<ScopeInfo> object() const;
+
+ int ContextLength() const;
+};
+
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(bool, is_safe_to_skip_arguments_adaptor) \
+ V(bool, IsInlineable) \
+ V(int, StartPosition) \
+ V(bool, is_compiled)
+
+class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<SharedFunctionInfo> object() const;
+
+ int builtin_id() const;
+ BytecodeArrayRef GetBytecodeArray() const;
+
+#define DECL_ACCESSOR(type, name) type name() const;
+ BROKER_SFI_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
+ void SetSerializedForCompilation(FeedbackVectorRef feedback);
+
+ // Template objects may not be created at compilation time. This method
+ // wraps the retrieval of the template object and creates it if
+ // necessary.
+ JSArrayRef GetTemplateObject(ObjectRef description, FeedbackVectorRef vector,
+ FeedbackSlot slot, bool serialize = false);
+
+ void SerializeFunctionTemplateInfo();
+ base::Optional<FunctionTemplateInfoRef> function_template_info() const;
+};
+
+class StringRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+ Handle<String> object() const;
+
+ int length() const;
+ uint16_t GetFirstChar();
+ base::Optional<double> ToNumber();
+ bool IsSeqString() const;
+ bool IsExternalString() const;
+};
+
+class SymbolRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+ Handle<Symbol> object() const;
+};
+
+class JSTypedArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSTypedArray> object() const;
+
+ bool is_on_heap() const;
+ size_t length() const;
+ void* external_pointer() const;
+
+ void Serialize();
+ bool serialized() const;
+
+ HeapObjectRef buffer() const;
+};
+
+class SourceTextModuleRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<SourceTextModule> object() const;
+
+ void Serialize();
+
+ CellRef GetCell(int cell_index) const;
+};
+
+class CellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Cell> object() const;
+
+ ObjectRef value() const;
+};
+
+class JSGlobalProxyRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSGlobalProxy> object() const;
+
+ // If {serialize} is false:
+ // If the property is known to exist as a property cell (on the global
+ // object), return that property cell. Otherwise (not known to exist as a
+ // property cell or known not to exist as a property cell) return nothing.
+ // If {serialize} is true:
+ // Like above but potentially access the heap and serialize the necessary
+ // information.
+ base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name,
+ bool serialize = false) const;
+};
+
+class CodeRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Code> object() const;
+};
+
+class InternalizedStringRef : public StringRef {
+ public:
+ using StringRef::StringRef;
+ Handle<InternalizedString> object() const;
+};
+
+class ElementAccessFeedback;
+class NamedAccessFeedback;
+
+class ProcessedFeedback : public ZoneObject {
+ public:
+ enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
+ Kind kind() const { return kind_; }
+
+ ElementAccessFeedback const* AsElementAccess() const;
+ NamedAccessFeedback const* AsNamedAccess() const;
+
+ protected:
+ explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
+
+ private:
+ Kind const kind_;
+};
+
+class InsufficientFeedback final : public ProcessedFeedback {
+ public:
+ InsufficientFeedback();
+};
+
+class GlobalAccessFeedback : public ProcessedFeedback {
+ public:
+ explicit GlobalAccessFeedback(PropertyCellRef cell);
+ GlobalAccessFeedback(ContextRef script_context, int slot_index,
+ bool immutable);
+
+ bool IsPropertyCell() const;
+ PropertyCellRef property_cell() const;
+
+ bool IsScriptContextSlot() const { return !IsPropertyCell(); }
+ ContextRef script_context() const;
+ int slot_index() const;
+ bool immutable() const;
+
+ base::Optional<ObjectRef> GetConstantHint() const;
+
+ private:
+ ObjectRef const cell_or_context_;
+ int const index_and_immutable_;
+};
+
+class KeyedAccessMode {
+ public:
+ static KeyedAccessMode FromNexus(FeedbackNexus const& nexus);
+
+ AccessMode access_mode() const;
+ bool IsLoad() const;
+ bool IsStore() const;
+ KeyedAccessLoadMode load_mode() const;
+ KeyedAccessStoreMode store_mode() const;
+
+ private:
+ AccessMode const access_mode_;
+ union LoadStoreMode {
+ LoadStoreMode(KeyedAccessLoadMode load_mode);
+ LoadStoreMode(KeyedAccessStoreMode store_mode);
+ KeyedAccessLoadMode load_mode;
+ KeyedAccessStoreMode store_mode;
+ } const load_store_mode_;
+
+ KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode);
+ KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode);
+};
+
+class ElementAccessFeedback : public ProcessedFeedback {
+ public:
+ ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode);
+
+ // No transition sources appear in {receiver_maps}.
+ // All transition targets appear in {receiver_maps}.
+ ZoneVector<Handle<Map>> receiver_maps;
+ ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
+
+ KeyedAccessMode const keyed_mode;
+
+ class MapIterator {
+ public:
+ bool done() const;
+ void advance();
+ MapRef current() const;
+
+ private:
+ friend class ElementAccessFeedback;
+
+ explicit MapIterator(ElementAccessFeedback const& processed,
+ JSHeapBroker* broker);
+
+ ElementAccessFeedback const& processed_;
+ JSHeapBroker* const broker_;
+ size_t index_ = 0;
+ };
+
+ // Iterator over all maps: first {receiver_maps}, then transition sources.
+ MapIterator all_maps(JSHeapBroker* broker) const;
+};
+
+class NamedAccessFeedback : public ProcessedFeedback {
+ public:
+ NamedAccessFeedback(NameRef const& name,
+ ZoneVector<PropertyAccessInfo> const& access_infos);
+
+ NameRef const& name() const { return name_; }
+ ZoneVector<PropertyAccessInfo> const& access_infos() const {
+ return access_infos_;
+ }
+
+ private:
+ NameRef const name_;
+ ZoneVector<PropertyAccessInfo> const access_infos_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_HEAP_REFS_H_
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 3430b6b339..eda866e5f2 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -97,7 +97,10 @@ int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) {
int GetParameterIndexAfterLowering(
Signature<MachineRepresentation>* signature, int old_index) {
int result = old_index;
- for (int i = 0; i < old_index; i++) {
+ // Be robust towards special indexes (>= param count).
+ int max_to_check =
+ std::min(old_index, static_cast<int>(signature->parameter_count()));
+ for (int i = 0; i < max_to_check; i++) {
if (signature->GetParam(i) == MachineRepresentation::kWord64) {
result++;
}
@@ -142,16 +145,16 @@ int Int64Lowering::GetParameterCountAfterLowering(
signature, static_cast<int>(signature->parameter_count()));
}
-void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
- Node*& index_high) {
+void Int64Lowering::GetIndexNodes(Node* index, Node** index_low,
+ Node** index_high) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- index_low = index;
- index_high = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
+ *index_low = index;
+ *index_high = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
#elif defined(V8_TARGET_BIG_ENDIAN)
- index_low = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
- index_high = index;
+ *index_low = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+ *index_high = index;
#endif
}
@@ -182,7 +185,7 @@ void Int64Lowering::LowerNode(Node* node) {
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
- GetIndexNodes(index, index_low, index_high);
+ GetIndexNodes(index, &index_low, &index_high);
const Operator* load_op;
if (node->opcode() == IrOpcode::kLoad) {
@@ -232,7 +235,7 @@ void Int64Lowering::LowerNode(Node* node) {
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
- GetIndexNodes(index, index_low, index_high);
+ GetIndexNodes(index, &index_low, &index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
@@ -291,12 +294,6 @@ void Int64Lowering::LowerNode(Node* node) {
// changes.
if (GetParameterCountAfterLowering(signature()) != param_count) {
int old_index = ParameterIndexOf(node->op());
- // Prevent special lowering of wasm's instance or JS
- // context/closure parameters.
- if (old_index <= 0 || old_index > param_count) {
- DefaultLowering(node);
- break;
- }
// Adjust old_index to be compliant with the signature.
--old_index;
int new_index = GetParameterIndexAfterLowering(signature(), old_index);
@@ -304,6 +301,12 @@ void Int64Lowering::LowerNode(Node* node) {
++new_index;
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+ if (old_index < 0 || old_index >= param_count) {
+ // Special parameters (JS closure/context) don't have kWord64
+ // representation anyway.
+ break;
+ }
+
if (signature()->GetParam(old_index) ==
MachineRepresentation::kWord64) {
Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1),
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index b083805771..9c77cf41a3 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -59,7 +59,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
bool HasReplacementHigh(Node* node);
Node* GetReplacementHigh(Node* node);
void PreparePhiReplacement(Node* phi);
- void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
+ void GetIndexNodes(Node* index, Node** index_low, Node** index_high);
void ReplaceNodeWithProjections(Node* node);
void LowerMemoryBaseAndIndex(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index d58331c85e..8128f89949 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -179,6 +179,100 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op,
return Replace(value);
}
+// ES section #sec-math.hypot Math.hypot ( value1, value2, ...values )
+Reduction JSCallReducer::ReduceMathHypot(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->ZeroConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ NodeVector values(graph()->zone());
+
+ Node* max = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ NodeProperties::GetValueInput(node, 2), effect, control);
+ max = graph()->NewNode(simplified()->NumberAbs(), max);
+ values.push_back(max);
+ for (int i = 3; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ NodeProperties::GetValueInput(node, i), effect, control);
+ input = graph()->NewNode(simplified()->NumberAbs(), input);
+ values.push_back(input);
+
+ // Make sure {max} is NaN in the end in case any argument was NaN.
+ max = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), input, max),
+ max, input);
+ }
+
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), max,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0 = jsgraph()->ZeroConstant();
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(simplified()->NumberEqual(), max,
+ jsgraph()->Constant(V8_INFINITY));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = jsgraph()->Constant(V8_INFINITY);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ // Kahan summation to avoid rounding errors.
+ // Normalize the numbers to the largest one to avoid overflow.
+ Node* sum = jsgraph()->ZeroConstant();
+ Node* compensation = jsgraph()->ZeroConstant();
+ for (Node* value : values) {
+ Node* n = graph()->NewNode(simplified()->NumberDivide(), value, max);
+ Node* summand = graph()->NewNode(
+ simplified()->NumberSubtract(),
+ graph()->NewNode(simplified()->NumberMultiply(), n, n),
+ compensation);
+ Node* preliminary =
+ graph()->NewNode(simplified()->NumberAdd(), sum, summand);
+ compensation = graph()->NewNode(
+ simplified()->NumberSubtract(),
+ graph()->NewNode(simplified()->NumberSubtract(), preliminary, sum),
+ summand);
+ sum = preliminary;
+ }
+ vfalse1 = graph()->NewNode(
+ simplified()->NumberMultiply(),
+ graph()->NewNode(simplified()->NumberSqrt(), sum), max);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
+ vfalse0, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
@@ -274,6 +368,8 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
size_t arity = p.arity();
@@ -381,9 +477,17 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
}
}
// Change {node} to the new {JSCall} operator.
+ // TODO(mslekova): Since this introduces a Call that will get optimized by
+ // the JSCallReducer, we basically might have to do all the serialization
+ // that we do for that here as well. The only difference is that here we
+ // disable speculation (cf. the empty VectorSlotPair above), causing the
+ // JSCallReducer to do much less work. We should revisit this later.
NodeProperties::ChangeOp(
node,
javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
+ // TODO(mslekova): Remove once ReduceJSCall is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -496,6 +600,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
@@ -508,6 +614,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
HeapObjectMatcher m(target);
if (m.HasValue()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ if (FLAG_concurrent_inlining && !function.serialized()) {
+ TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function);
+ return NoChange();
+ }
context = jsgraph()->Constant(function.context());
} else {
context = effect = graph()->NewNode(
@@ -537,6 +647,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
NodeProperties::ChangeOp(
node,
javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
+ // TODO(mslekova): Remove once ReduceJSCall is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -588,7 +701,6 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
MapRef object_map(broker(), object_maps[i]);
object_map.SerializePrototype();
if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
- object_map.has_hidden_prototype() ||
!object_map.prototype().equals(candidate_prototype)) {
// We exclude special receivers, like JSProxy or API objects that
// might require access checks here; we also don't want to deal
@@ -1002,27 +1114,28 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
return true;
}
-bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
- MapHandles const& receiver_maps,
- ElementsKind* kind_return,
- bool builtin_is_push = false) {
+bool CanInlineArrayResizingBuiltin(
+ JSHeapBroker* broker, MapHandles const& receiver_maps,
+ std::vector<ElementsKind>& kinds, // NOLINT(runtime/references)
+ bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
- *kind_return = MapRef(broker, receiver_maps[0]).elements_kind();
for (auto receiver_map : receiver_maps) {
MapRef map(broker, receiver_map);
if (!map.supports_fast_array_resize()) return false;
- if (builtin_is_push) {
- if (!UnionElementsKindUptoPackedness(kind_return, map.elements_kind())) {
- return false;
- }
- } else {
- // TODO(turbofan): We should also handle fast holey double elements once
- // we got the hole NaN mess sorted out in TurboFan/V8.
- if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS ||
- !UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
- return false;
+ // TODO(turbofan): We should also handle fast holey double elements once
+ // we got the hole NaN mess sorted out in TurboFan/V8.
+ if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS && !builtin_is_push) {
+ return false;
+ }
+ ElementsKind current_kind = map.elements_kind();
+ auto kind_ptr = kinds.data();
+ size_t i;
+ for (i = 0; i < kinds.size(); i++, kind_ptr++) {
+ if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) {
+ break;
}
}
+ if (i == kinds.size()) kinds.push_back(current_kind);
}
return true;
}
@@ -2735,6 +2848,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, const SharedFunctionInfoRef& shared) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
@@ -2750,78 +2865,21 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
- // See if we can optimize this API call to {shared}.
- Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(shared.object()->function_data()), isolate());
- CallOptimization call_optimization(isolate(), function_template_info);
- if (!call_optimization.is_simple_api_call()) return NoChange();
-
- // Try to infer the {receiver} maps from the graph.
- MapInference inference(broker(), receiver, effect);
- if (inference.HaveMaps()) {
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check that all {receiver_maps} are actually JSReceiver maps and
- // that the {function_template_info} accepts them without access
- // checks (even if "access check needed" is set for {receiver}).
- //
- // Note that we don't need to know the concrete {receiver} maps here,
- // meaning it's fine if the {receiver_maps} are unreliable, and we also
- // don't need to install any stability dependencies, since the only
- // relevant information regarding the {receiver} is the Map::constructor
- // field on the root map (which is different from the JavaScript exposed
- // "constructor" property) and that field cannot change.
- //
- // So if we know that {receiver} had a certain constructor at some point
- // in the past (i.e. it had a certain map), then this constructor is going
- // to be the same later, since this information cannot change with map
- // transitions.
- //
- // The same is true for the instance type, e.g. we still know that the
- // instance type is JSObject even if that information is unreliable, and
- // the "access check needed" bit, which also cannot change later.
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !function_template_info->accept_any_receiver())) {
- return inference.NoChange();
- }
- }
-
- // See if we can constant-fold the compatible receiver checks.
- CallOptimization::HolderLookup lookup;
- Handle<JSObject> api_holder =
- call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
- if (lookup == CallOptimization::kHolderNotFound)
- return inference.NoChange();
- for (size_t i = 1; i < receiver_maps.size(); ++i) {
- CallOptimization::HolderLookup lookupi;
- Handle<JSObject> holderi = call_optimization.LookupHolderOfExpectedType(
- receiver_maps[i], &lookupi);
- if (lookup != lookupi) return inference.NoChange();
- if (!api_holder.is_identical_to(holderi)) return inference.NoChange();
- }
+ if (!shared.function_template_info().has_value()) {
+ TRACE_BROKER_MISSING(
+ broker(), "FunctionTemplateInfo for function with SFI " << shared);
+ return NoChange();
+ }
- if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation &&
- !inference.RelyOnMapsViaStability(dependencies())) {
- // We were not able to make the receiver maps reliable without map checks
- // but doing map checks would lead to deopt loops, so give up.
- return inference.NoChange();
- }
+ // See if we can optimize this API call to {shared}.
+ FunctionTemplateInfoRef function_template_info(
+ shared.function_template_info().value());
- // TODO(neis): The maps were used in a way that does not actually require
- // map checks or stability dependencies.
- inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
- control, p.feedback());
+ if (!function_template_info.has_call_code()) return NoChange();
- // Determine the appropriate holder for the {lookup}.
- holder = lookup == CallOptimization::kHolderFound
- ? jsgraph()->HeapConstant(api_holder)
- : receiver;
- } else if (function_template_info->accept_any_receiver() &&
- function_template_info->signature().IsUndefined(isolate())) {
- // We haven't found any {receiver_maps}, but we might still be able to
+ if (function_template_info.accept_any_receiver() &&
+ function_template_info.is_signature_undefined()) {
+ // We might be able to
// optimize the API call depending on the {function_template_info}.
// If the API function accepts any kind of {receiver}, we only need to
// ensure that the {receiver} is actually a JSReceiver at this point,
@@ -2840,51 +2898,127 @@ Reduction JSCallReducer::ReduceCallApiFunction(
graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
receiver, global_proxy, effect, control);
} else {
- // We don't have enough information to eliminate the access check
- // and/or the compatible receiver check, so use the generic builtin
- // that does those checks dynamically. This is still significantly
- // faster than the generic call sequence.
- Builtins::Name builtin_name =
- !function_template_info->accept_any_receiver()
- ? (function_template_info->signature().IsUndefined(isolate())
- ? Builtins::kCallFunctionTemplate_CheckAccess
- : Builtins::
- kCallFunctionTemplate_CheckAccessAndCompatibleReceiver)
- : Builtins::kCallFunctionTemplate_CheckCompatibleReceiver;
-
- // The CallFunctionTemplate builtin requires the {receiver} to be
- // an actual JSReceiver, so make sure we do the proper conversion
- // first if necessary.
- receiver = holder = effect =
- graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
- receiver, global_proxy, effect, control);
+ // Try to infer the {receiver} maps from the graph.
+ MapInference inference(broker(), receiver, effect);
+ if (inference.HaveMaps()) {
+ MapHandles const& receiver_maps = inference.GetMaps();
+ MapRef first_receiver_map(broker(), receiver_maps[0]);
+
+ // See if we can constant-fold the compatible receiver checks.
+ HolderLookupResult api_holder =
+ function_template_info.LookupHolderOfExpectedType(first_receiver_map,
+ false);
+ if (api_holder.lookup == CallOptimization::kHolderNotFound)
+ return inference.NoChange();
- Callable callable = Builtins::CallableFor(isolate(), builtin_name);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->ReplaceInput(1, jsgraph()->HeapConstant(function_template_info));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
- node->ReplaceInput(3, receiver); // Update receiver input.
- node->ReplaceInput(6 + argc, effect); // Update effect input.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- return Changed(node);
+ // Check that all {receiver_maps} are actually JSReceiver maps and
+ // that the {function_template_info} accepts them without access
+ // checks (even if "access check needed" is set for {receiver}).
+ //
+ // Note that we don't need to know the concrete {receiver} maps here,
+ // meaning it's fine if the {receiver_maps} are unreliable, and we also
+ // don't need to install any stability dependencies, since the only
+ // relevant information regarding the {receiver} is the Map::constructor
+ // field on the root map (which is different from the JavaScript exposed
+ // "constructor" property) and that field cannot change.
+ //
+ // So if we know that {receiver} had a certain constructor at some point
+ // in the past (i.e. it had a certain map), then this constructor is going
+ // to be the same later, since this information cannot change with map
+ // transitions.
+ //
+ // The same is true for the instance type, e.g. we still know that the
+ // instance type is JSObject even if that information is unreliable, and
+ // the "access check needed" bit, which also cannot change later.
+ CHECK(first_receiver_map.IsJSReceiverMap());
+ CHECK(!first_receiver_map.is_access_check_needed() ||
+ function_template_info.accept_any_receiver());
+
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ MapRef receiver_map(broker(), receiver_maps[i]);
+ HolderLookupResult holder_i =
+ function_template_info.LookupHolderOfExpectedType(receiver_map,
+ false);
+
+ if (api_holder.lookup != holder_i.lookup) return inference.NoChange();
+ if (!(api_holder.holder.has_value() && holder_i.holder.has_value()))
+ return inference.NoChange();
+ if (!api_holder.holder->equals(*holder_i.holder))
+ return inference.NoChange();
+
+ CHECK(receiver_map.IsJSReceiverMap());
+ CHECK(!receiver_map.is_access_check_needed() ||
+ function_template_info.accept_any_receiver());
+ }
+
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation &&
+ !inference.RelyOnMapsViaStability(dependencies())) {
+ // We were not able to make the receiver maps reliable without map
+ // checks but doing map checks would lead to deopt loops, so give up.
+ return inference.NoChange();
+ }
+
+ // TODO(neis): The maps were used in a way that does not actually require
+ // map checks or stability dependencies.
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
+
+ // Determine the appropriate holder for the {lookup}.
+ holder = api_holder.lookup == CallOptimization::kHolderFound
+ ? jsgraph()->Constant(*api_holder.holder)
+ : receiver;
+ } else {
+ // We don't have enough information to eliminate the access check
+ // and/or the compatible receiver check, so use the generic builtin
+ // that does those checks dynamically. This is still significantly
+ // faster than the generic call sequence.
+ Builtins::Name builtin_name;
+ if (function_template_info.accept_any_receiver()) {
+ builtin_name = Builtins::kCallFunctionTemplate_CheckCompatibleReceiver;
+ } else if (function_template_info.is_signature_undefined()) {
+ builtin_name = Builtins::kCallFunctionTemplate_CheckAccess;
+ } else {
+ builtin_name =
+ Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver;
+ }
+
+ // The CallFunctionTemplate builtin requires the {receiver} to be
+ // an actual JSReceiver, so make sure we do the proper conversion
+ // first if necessary.
+ receiver = holder = effect =
+ graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
+ receiver, global_proxy, effect, control);
+
+ Callable callable = Builtins::CallableFor(isolate(), builtin_name);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->ReplaceInput(3, receiver); // Update receiver input.
+ node->ReplaceInput(6 + argc, effect); // Update effect input.
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+ }
}
// TODO(turbofan): Consider introducing a JSCallApiCallback operator for
// this and lower it during JSGenericLowering, and unify this with the
// JSNativeContextSpecialization::InlineApiCall method a bit.
- Handle<CallHandlerInfo> call_handler_info(
- CallHandlerInfo::cast(function_template_info->call_code()), isolate());
- Handle<Object> data(call_handler_info->data(), isolate());
+ if (!function_template_info.call_code().has_value()) {
+ TRACE_BROKER_MISSING(broker(), "call code for function template info "
+ << function_template_info);
+ return NoChange();
+ }
+ CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), cid, argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState);
- ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+ ApiFunction api_function(call_handler_info.callback());
ExternalReference function_reference = ExternalReference::Create(
&api_function, ExternalReference::DIRECT_API_CALL);
@@ -2895,7 +3029,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
jsgraph()->HeapConstant(call_api_callback.code()));
node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference));
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(data));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(call_handler_info.data()));
node->InsertInput(graph()->zone(), 4, holder);
node->ReplaceInput(5, receiver); // Update receiver input.
node->ReplaceInput(7 + argc, continuation_frame_state);
@@ -3495,6 +3630,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceMathUnary(node, simplified()->NumberFloor());
case Builtins::kMathFround:
return ReduceMathUnary(node, simplified()->NumberFround());
+ case Builtins::kMathHypot:
+ return ReduceMathHypot(node);
case Builtins::kMathLog:
return ReduceMathUnary(node, simplified()->NumberLog());
case Builtins::kMathLog1p:
@@ -3563,8 +3700,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(),
node);
case Builtins::kStringPrototypeCodePointAt:
- return ReduceStringPrototypeStringAt(
- simplified()->StringCodePointAt(UnicodeEncoding::UTF32), node);
+ return ReduceStringPrototypeStringAt(simplified()->StringCodePointAt(),
+ node);
case Builtins::kStringPrototypeSubstring:
return ReduceStringPrototypeSubstring(node);
case Builtins::kStringPrototypeSlice:
@@ -3642,18 +3779,23 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceDateNow(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
+ case Builtins::kBigIntAsUintN:
+ return ReduceBigIntAsUintN(node);
default:
break;
}
- if (!TracingFlags::is_runtime_stats_enabled() &&
- shared.object()->IsApiFunction()) {
+ if (shared.object()->IsApiFunction()) {
return ReduceCallApiFunction(node, shared);
}
return NoChange();
}
Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
+ // TODO(mslekova): Remove once ReduceJSCallWithArrayLike is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
VectorSlotPair feedback;
@@ -4250,6 +4392,52 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
return Changed(node);
}
+Node* JSCallReducer::LoadReceiverElementsKind(Node* receiver, Node** effect,
+ Node** control) {
+ Node* receiver_map = *effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, *effect, *control);
+ Node* receiver_bit_field2 = *effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map,
+ *effect, *control);
+ Node* receiver_elements_kind = graph()->NewNode(
+ simplified()->NumberShiftRightLogical(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2,
+ jsgraph()->Constant(Map::ElementsKindBits::kMask)),
+ jsgraph()->Constant(Map::ElementsKindBits::kShift));
+ return receiver_elements_kind;
+}
+
+void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind,
+ ElementsKind kind, Node* control,
+ Node** if_true, Node** if_false) {
+ Node* is_packed_kind =
+ graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind,
+ jsgraph()->Constant(GetPackedElementsKind(kind)));
+ Node* packed_branch =
+ graph()->NewNode(common()->Branch(), is_packed_kind, control);
+ Node* if_packed = graph()->NewNode(common()->IfTrue(), packed_branch);
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* if_not_packed = graph()->NewNode(common()->IfFalse(), packed_branch);
+ Node* is_holey_kind =
+ graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind,
+ jsgraph()->Constant(GetHoleyElementsKind(kind)));
+ Node* holey_branch =
+ graph()->NewNode(common()->Branch(), is_holey_kind, if_not_packed);
+ Node* if_holey = graph()->NewNode(common()->IfTrue(), holey_branch);
+
+ Node* if_not_packed_not_holey =
+ graph()->NewNode(common()->IfFalse(), holey_branch);
+
+ *if_true = graph()->NewNode(common()->Merge(2), if_packed, if_holey);
+ *if_false = if_not_packed_not_holey;
+ } else {
+ *if_true = if_packed;
+ *if_false = graph()->NewNode(common()->IfFalse(), packed_branch);
+ }
+}
+
// ES6 section 22.1.3.18 Array.prototype.push ( )
Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -4267,81 +4455,121 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind, true)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Collect the value inputs to push.
- std::vector<Node*> values(num_values);
- for (int i = 0; i < num_values; ++i) {
- values[i] = NodeProperties::GetValueInput(node, 2 + i);
- }
-
- for (auto& value : values) {
- if (IsSmiElementsKind(kind)) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- value, effect, control);
- } else if (IsDoubleElementsKind(kind)) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()),
- value, effect, control);
- // Make sure we do not store signaling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* return_value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
}
- }
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
- Node* value = length;
+ // Collect the value inputs to push.
+ std::vector<Node*> values(num_values);
+ for (int i = 0; i < num_values; ++i) {
+ values[i] = NodeProperties::GetValueInput(node, 2 + i);
+ }
- // Check if we have any {values} to push.
- if (num_values > 0) {
- // Compute the resulting "length" of the {receiver}.
- Node* new_length = value = graph()->NewNode(
- simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
+ for (auto& value : values) {
+ if (IsSmiElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ value, effect, control);
+ } else if (IsDoubleElementsKind(kind)) {
+ value = effect = graph()->NewNode(
+ simplified()->CheckNumber(p.feedback()), value, effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
- // Load the elements backing store of the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
- effect, control);
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
+ return_value = length;
- GrowFastElementsMode mode =
- IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
- elements,
- graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(num_values - 1)),
- elements_length, effect, control);
-
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, new_length, effect, control);
+ // Check if we have any {values} to push.
+ if (num_values > 0) {
+ // Compute the resulting "length" of the {receiver}.
+ Node* new_length = return_value = graph()->NewNode(
+ simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
- // Append the {values} to the {elements}.
- for (int i = 0; i < num_values; ++i) {
- Node* value = values[i];
- Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(i));
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(kind)
+ ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
+ elements,
+ graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(num_values - 1)),
+ elements_length, effect, control);
+
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
effect = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, index, value, effect, control);
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, new_length, effect, control);
+
+ // Append the {values} to the {elements}.
+ for (int i = 0; i < num_values; ++i) {
+ Node* value = values[i];
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(i));
+ effect =
+ graph()->NewNode(simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(kind)),
+ elements, index, value, effect, control);
+ }
}
+
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(return_value);
}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
+
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
+ return_value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
+ }
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
}
// ES6 section 22.1.3.17 Array.prototype.pop ( )
@@ -4360,79 +4588,117 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
+ }
- // Check if the {receiver} has any elements.
- Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->UndefinedConstant();
+ // Check if the {receiver} has any elements.
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // TODO(tebbi): We should trim the backing store if the capacity is too
- // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
-
- // Load the elements backing store from the {receiver}.
- Node* elements = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- efalse, if_false);
-
- // Ensure that we aren't popping from a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(kind)) {
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
- elements, efalse, if_false);
- }
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
+ // Load the elements backing store from the {receiver}.
+ Node* elements = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, efalse, if_false);
+
+ // Ensure that we aren't popping from a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, efalse, if_false);
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
- // Store the new {length} to the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, length, efalse, if_false);
+ // Store the new {length} to the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, efalse, if_false);
+
+ // Load the last entry from the {elements}.
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, length, efalse, if_false);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
- // Load the last entry from the {elements}.
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, length, efalse, if_false);
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
- // Store a hole to the element we just removed from the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
- elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(value);
}
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(kind)) {
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
}
ReplaceWithValue(node, value, effect, control);
@@ -4458,151 +4724,172 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Load length of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
+ }
- // Return undefined if {receiver} has no elements.
- Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ // Load length of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->UndefinedConstant();
+ // Return undefined if {receiver} has no elements.
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if we should take the fast-path.
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(JSArray::kMaxCopyElements));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->UndefinedConstant();
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
{
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, etrue1, if_true1);
-
- // Load the first element here, which we return below.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+ // Check if we should take the fast-path.
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(JSArray::kMaxCopyElements));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ Node* elements = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, etrue1, if_true1);
- // Ensure that we aren't shifting a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(kind)) {
- elements = etrue1 =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, etrue1, if_true1);
- }
+ // Load the first element here, which we return below.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement(kind)),
+ elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+
+ // Ensure that we aren't shifting a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = etrue1 =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, etrue1, if_true1);
+ }
- // Shift the remaining {elements} by one towards the start.
- Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* index = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->OneConstant(),
- jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
+ // Shift the remaining {elements} by one towards the start.
+ Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* index = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->OneConstant(),
+ jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
- {
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
+ {
+ Node* check2 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
- if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
- etrue1 = eloop;
+ if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
+ etrue1 = eloop;
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
+ Node* control = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* effect = etrue1;
- ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind);
- Node* value = effect =
- graph()->NewNode(simplified()->LoadElement(access), elements, index,
- effect, control);
- effect =
- graph()->NewNode(simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(),
- index, jsgraph()->OneConstant()),
- value, effect, control);
-
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
- index->ReplaceInput(1,
- graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant()));
- }
+ ElementAccess const access =
+ AccessBuilder::ForFixedArrayElement(kind);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadElement(access), elements,
+ index, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreElement(access), elements,
+ graph()->NewNode(simplified()->NumberSubtract(), index,
+ jsgraph()->OneConstant()),
+ value, effect, control);
+
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+ index->ReplaceInput(1,
+ graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant()));
+ }
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
- // Store the new {length} to the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, length, etrue1, if_true1);
+ // Store the new {length} to the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, etrue1, if_true1);
- // Store a hole to the element we just removed from the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
- elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
- }
+ // Store a hole to the element we just removed from the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
+ GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
+ }
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Call the generic C++ implementation.
- const int builtin_index = Builtins::kArrayShift;
- auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
- graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
- Builtins::name(builtin_index), node->op()->properties(),
- CallDescriptor::kNeedsFrameState);
- Node* stub_code =
- jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true);
- Address builtin_entry = Builtins::CppEntryOf(builtin_index);
- Node* entry =
- jsgraph()->ExternalConstant(ExternalReference::Create(builtin_entry));
- Node* argc =
- jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
- if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(call_descriptor), stub_code, receiver,
- jsgraph()->PaddingConstant(), argc, target,
- jsgraph()->UndefinedConstant(), entry, argc, context,
- frame_state, efalse1, if_false1);
- }
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ // Call the generic C++ implementation.
+ const int builtin_index = Builtins::kArrayShift;
+ auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
+ graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
+ Builtins::name(builtin_index), node->op()->properties(),
+ CallDescriptor::kNeedsFrameState);
+ Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
+ kArgvOnStack, true);
+ Address builtin_entry = Builtins::CppEntryOf(builtin_index);
+ Node* entry = jsgraph()->ExternalConstant(
+ ExternalReference::Create(builtin_entry));
+ Node* argc =
+ jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
+ if_false1 = efalse1 = vfalse1 =
+ graph()->NewNode(common()->Call(call_descriptor), stub_code,
+ receiver, jsgraph()->PaddingConstant(), argc,
+ target, jsgraph()->UndefinedConstant(), entry,
+ argc, context, frame_state, efalse1, if_false1);
+ }
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
// Convert the hole to undefined. Do this last, so that we can optimize
// conversion operator via some smart strength reduction in many cases.
@@ -4611,8 +4898,27 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(value);
+ }
+
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
+
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
+ value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
// ES6 section 22.1.3.23 Array.prototype.slice ( )
@@ -5230,8 +5536,8 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
graph()->NewNode(simplified()->CheckBounds(p.feedback()), input,
jsgraph()->Constant(0x10FFFF + 1), effect, control);
- Node* value = graph()->NewNode(
- simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF32), input);
+ Node* value =
+ graph()->NewNode(simplified()->StringFromSingleCodePoint(), input);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
@@ -5287,12 +5593,8 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
Node* vtrue0;
{
done_true = jsgraph()->FalseConstant();
- Node* codepoint = etrue0 = graph()->NewNode(
- simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string, index,
- etrue0, if_true0);
- vtrue0 = graph()->NewNode(
- simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16),
- codepoint);
+ vtrue0 = etrue0 = graph()->NewNode(simplified()->StringFromCodePointAt(),
+ string, index, etrue0, if_true0);
// Update iterator.[[NextIndex]]
Node* char_length = graph()->NewNode(simplified()->StringLength(), vtrue0);
@@ -5396,6 +5698,8 @@ Node* JSCallReducer::CreateArtificialFrameState(
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
int arity = static_cast<int>(p.arity() - 2);
@@ -5404,7 +5708,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* executor = NodeProperties::GetValueInput(node, 1);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
-
Node* context = NodeProperties::GetContextInput(node);
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -5459,7 +5762,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
// Allocate a promise context for the closures below.
Node* promise_context = effect = graph()->NewNode(
javascript()->CreateFunctionContext(
- handle(native_context().object()->scope_info(), isolate()),
+ native_context().scope_info().object(),
PromiseBuiltins::kPromiseContextLength - Context::MIN_CONTEXT_SLOTS,
FUNCTION_SCOPE),
context, effect, control);
@@ -5477,21 +5780,13 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
promise_context, jsgraph()->TrueConstant(), effect, control);
// Allocate the closure for the resolve case.
- SharedFunctionInfoRef resolve_shared =
- native_context().promise_capability_default_resolve_shared_fun();
- Node* resolve = effect = graph()->NewNode(
- javascript()->CreateClosure(
- resolve_shared.object(), factory()->many_closures_cell(),
- handle(resolve_shared.object()->GetCode(), isolate())),
+ Node* resolve = effect = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_capability_default_resolve_shared_fun(),
promise_context, effect, control);
// Allocate the closure for the reject case.
- SharedFunctionInfoRef reject_shared =
- native_context().promise_capability_default_reject_shared_fun();
- Node* reject = effect = graph()->NewNode(
- javascript()->CreateClosure(
- reject_shared.object(), factory()->many_closures_cell(),
- handle(reject_shared.object()->GetCode(), isolate())),
+ Node* reject = effect = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_capability_default_reject_shared_fun(),
promise_context, effect, control);
const std::vector<Node*> checkpoint_parameters_continuation(
@@ -5624,6 +5919,30 @@ Reduction JSCallReducer::ReducePromiseInternalResolve(Node* node) {
return Replace(value);
}
+bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
+ if (!inference->HaveMaps()) return false;
+ MapHandles const& receiver_maps = inference->GetMaps();
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSPromiseMap()) return false;
+ if (!FLAG_concurrent_inlining) {
+ receiver_map.SerializePrototype();
+ } else if (!receiver_map.serialized_prototype()) {
+ TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map);
+ return false;
+ }
+ if (!receiver_map.prototype().equals(
+ native_context().promise_prototype())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
// ES section #sec-promise.prototype.catch
Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -5637,20 +5956,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
if (!dependencies()->DependOnPromiseThenProtector())
return inference.NoChange();
@@ -5675,8 +5981,21 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
+Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
+ SharedFunctionInfoRef shared, Node* context, Node* effect, Node* control) {
+ DCHECK(shared.HasBuiltinId());
+ Callable const callable = Builtins::CallableFor(
+ isolate(), static_cast<Builtins::Name>(shared.builtin_id()));
+ return graph()->NewNode(
+ javascript()->CreateClosure(
+ shared.object(), factory()->many_closures_cell(), callable.code()),
+ context, effect, control);
+}
+
// ES section #sec-promise.prototype.finally
Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int arity = static_cast<int>(p.arity() - 2);
@@ -5690,21 +6009,9 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
-
if (!dependencies()->DependOnPromiseHookProtector())
return inference.NoChange();
if (!dependencies()->DependOnPromiseThenProtector())
@@ -5730,13 +6037,13 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
jsgraph()->Constant(native_context().promise_function());
// Allocate shared context for the closures below.
- context = etrue = graph()->NewNode(
- javascript()->CreateFunctionContext(
- handle(native_context().object()->scope_info(), isolate()),
- PromiseBuiltins::kPromiseFinallyContextLength -
- Context::MIN_CONTEXT_SLOTS,
- FUNCTION_SCOPE),
- context, etrue, if_true);
+ context = etrue =
+ graph()->NewNode(javascript()->CreateFunctionContext(
+ native_context().scope_info().object(),
+ PromiseBuiltins::kPromiseFinallyContextLength -
+ Context::MIN_CONTEXT_SLOTS,
+ FUNCTION_SCOPE),
+ context, etrue, if_true);
etrue = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForContextSlot(PromiseBuiltins::kOnFinallySlot)),
@@ -5747,22 +6054,14 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
context, constructor, etrue, if_true);
// Allocate the closure for the reject case.
- SharedFunctionInfoRef catch_finally =
- native_context().promise_catch_finally_shared_fun();
- catch_true = etrue = graph()->NewNode(
- javascript()->CreateClosure(
- catch_finally.object(), factory()->many_closures_cell(),
- handle(catch_finally.object()->GetCode(), isolate())),
- context, etrue, if_true);
+ catch_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_catch_finally_shared_fun(), context, etrue,
+ if_true);
// Allocate the closure for the fulfill case.
- SharedFunctionInfoRef then_finally =
- native_context().promise_then_finally_shared_fun();
- then_true = etrue = graph()->NewNode(
- javascript()->CreateClosure(
- then_finally.object(), factory()->many_closures_cell(),
- handle(then_finally.object()->GetCode(), isolate())),
- context, etrue, if_true);
+ then_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_then_finally_shared_fun(), context, etrue,
+ if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -5810,6 +6109,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -5829,20 +6130,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
if (!dependencies()->DependOnPromiseHookProtector())
return inference.NoChange();
@@ -5889,6 +6177,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// ES section #sec-promise.resolve
Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* value = node->op()->ValueInputCount() > 2
@@ -6828,8 +7118,11 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) {
}
Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
+ DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
if (FLAG_force_slow_path) return NoChange();
if (node->op()->ValueInputCount() < 3) return NoChange();
+
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
@@ -6846,13 +7139,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
}
MapHandles const& regexp_maps = inference.GetMaps();
- // Compute property access info for "exec" on {resolution}.
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- access_info_factory.ComputePropertyAccessInfos(
- MapHandles(regexp_maps.begin(), regexp_maps.end()),
- factory()->exec_string(), AccessMode::kLoad, &access_infos);
+ if (!FLAG_concurrent_inlining) {
+ // Compute property access info for "exec" on {resolution}.
+ access_info_factory.ComputePropertyAccessInfos(
+ MapHandles(regexp_maps.begin(), regexp_maps.end()),
+ factory()->exec_string(), AccessMode::kLoad, &access_infos);
+ } else {
+ // Obtain precomputed access infos from the broker.
+ for (auto map : regexp_maps) {
+ MapRef map_ref(broker(), map);
+ PropertyAccessInfo access_info =
+ broker()->GetAccessInfoForLoadingExec(map_ref);
+ access_infos.push_back(access_info);
+ }
+ }
+
PropertyAccessInfo ai_exec =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
@@ -6864,34 +7168,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Do not reduce if the exec method is not on the prototype chain.
if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange();
+ JSObjectRef holder_ref(broker(), holder);
+
// Bail out if the exec method is not the original one.
- Handle<Object> constant = JSObject::FastPropertyAt(
- holder, ai_exec.field_representation(), ai_exec.field_index());
- if (!constant.is_identical_to(isolate()->regexp_exec_function())) {
+ base::Optional<ObjectRef> constant = holder_ref.GetOwnProperty(
+ ai_exec.field_representation(), ai_exec.field_index());
+ if (!constant.has_value() ||
+ !constant->equals(native_context().regexp_exec_function())) {
return inference.NoChange();
}
- // Protect the exec method change in the holder.
- Handle<Object> exec_on_proto;
- MapRef holder_map(broker(), handle(holder->map(), isolate()));
- Handle<DescriptorArray> descriptors(
- holder_map.object()->instance_descriptors(), isolate());
- int descriptor_index =
- descriptors->Search(*(factory()->exec_string()), *holder_map.object());
- CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
- holder_map.SerializeOwnDescriptors();
- dependencies()->DependOnFieldType(holder_map, descriptor_index);
- } else {
- return inference.NoChange();
- }
-
- // Add proper dependencies on the {regexp}s [[Prototype]]s.
- Handle<JSObject> holder;
- if (ai_exec.holder().ToHandle(&holder)) {
+ // Add proper dependencies on the {regexp}s [[Prototype]]s.
dependencies()->DependOnStablePrototypeChains(
ai_exec.receiver_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
+ } else {
+ return inference.NoChange();
}
+
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -6955,12 +7249,47 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
return Changed(node);
}
+Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
+ if (!jsgraph()->machine()->Is64()) {
+ return NoChange();
+ }
+
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* bits = NodeProperties::GetValueInput(node, 2);
+ Node* value = NodeProperties::GetValueInput(node, 3);
+
+ NumberMatcher matcher(bits);
+ if (matcher.IsInteger() && matcher.IsInRange(0, 64)) {
+ const int bits_value = static_cast<int>(matcher.Value());
+ value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()),
+ value, effect, control);
+ value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+
+ return NoChange();
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+NativeContextRef JSCallReducer::native_context() const {
+ return broker()->native_context();
+}
+
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 02821ebb0d..bf3676c5b2 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -29,6 +29,7 @@ struct FieldAccess;
class JSGraph;
class JSHeapBroker;
class JSOperatorBuilder;
+class MapInference;
class NodeProperties;
class SimplifiedOperatorBuilder;
@@ -155,6 +156,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value);
+ Reduction ReduceMathHypot(Node* node);
Reduction ReduceNumberIsFinite(Node* node);
Reduction ReduceNumberIsInteger(Node* node);
@@ -190,6 +192,15 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceNumberConstructor(Node* node);
+ Reduction ReduceBigIntAsUintN(Node* node);
+
+ // Helper to verify promise receiver maps are as expected.
+ // On bailout from a reduction, be sure to return inference.NoChange().
+ bool DoPromiseChecks(MapInference* inference);
+
+ Node* CreateClosureFromBuiltinSharedFunctionInfo(SharedFunctionInfoRef shared,
+ Node* context, Node* effect,
+ Node* control);
// Returns the updated {to} node, and updates control and effect along the
// way.
@@ -231,12 +242,16 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
const SharedFunctionInfoRef& shared,
Node* context = nullptr);
+ void CheckIfElementsKind(Node* receiver_elements_kind, ElementsKind kind,
+ Node* control, Node** if_true, Node** if_false);
+ Node* LoadReceiverElementsKind(Node* receiver, Node** effect, Node** control);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
Factory* factory() const;
- NativeContextRef native_context() const { return broker()->native_context(); }
+ NativeContextRef native_context() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index dea6d7fc2b..035e8b7ceb 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -6,6 +6,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -144,9 +145,10 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete.SerializeContextChain(); // TODO(neis): Remove later.
- for (; depth > 0; --depth) {
- concrete = concrete.previous();
+ concrete = concrete.previous(&depth);
+ if (depth > 0) {
+ TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
if (!access.immutable()) {
@@ -157,8 +159,6 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// This will hold the final value, if we can figure it out.
base::Optional<ObjectRef> maybe_value;
-
- concrete.SerializeSlot(static_cast<int>(access.index()));
maybe_value = concrete.get(static_cast<int>(access.index()));
if (maybe_value.has_value() && !maybe_value->IsSmi()) {
// Even though the context slot is immutable, the context might have escaped
@@ -174,6 +174,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
}
if (!maybe_value.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "slot value " << access.index()
+ << " for context "
+ << concrete);
return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
@@ -207,9 +210,10 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete.SerializeContextChain(); // TODO(neis): Remove later.
- for (; depth > 0; --depth) {
- concrete = concrete.previous();
+ concrete = concrete.previous(&depth);
+ if (depth > 0) {
+ TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
+ return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
}
return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 8fc8dd1308..4e69db6b9b 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -837,7 +837,7 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
iterated_object, effect, control);
- // Create the JSArrayIterator result.
+ // Create the JSCollectionIterator result.
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(JSCollectionIterator::kSize, AllocationType::kYoung,
Type::OtherObject());
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index a3805ec125..43a4beadee 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -128,9 +128,17 @@ void JSGraph::GetCachedNodes(NodeVector* nodes) {
DEFINE_GETTER(AllocateInYoungGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInYoungGeneration)))
+DEFINE_GETTER(AllocateRegularInYoungGenerationStubConstant,
+ HeapConstant(BUILTIN_CODE(isolate(),
+ AllocateRegularInYoungGeneration)))
+
DEFINE_GETTER(AllocateInOldGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInOldGeneration)))
+DEFINE_GETTER(AllocateRegularInOldGenerationStubConstant,
+ HeapConstant(BUILTIN_CODE(isolate(),
+ AllocateRegularInOldGeneration)))
+
DEFINE_GETTER(ArrayConstructorStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl)))
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index b5c80515ad..ec36c26034 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -80,31 +80,33 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
void GetCachedNodes(NodeVector* nodes);
// Cached global nodes.
-#define CACHED_GLOBAL_LIST(V) \
- V(AllocateInYoungGenerationStubConstant) \
- V(AllocateInOldGenerationStubConstant) \
- V(ArrayConstructorStubConstant) \
- V(BigIntMapConstant) \
- V(BooleanMapConstant) \
- V(ToNumberBuiltinConstant) \
- V(EmptyFixedArrayConstant) \
- V(EmptyStringConstant) \
- V(FixedArrayMapConstant) \
- V(PropertyArrayMapConstant) \
- V(FixedDoubleArrayMapConstant) \
- V(HeapNumberMapConstant) \
- V(OptimizedOutConstant) \
- V(StaleRegisterConstant) \
- V(UndefinedConstant) \
- V(TheHoleConstant) \
- V(TrueConstant) \
- V(FalseConstant) \
- V(NullConstant) \
- V(ZeroConstant) \
- V(OneConstant) \
- V(NaNConstant) \
- V(MinusOneConstant) \
- V(EmptyStateValues) \
+#define CACHED_GLOBAL_LIST(V) \
+ V(AllocateInYoungGenerationStubConstant) \
+ V(AllocateRegularInYoungGenerationStubConstant) \
+ V(AllocateInOldGenerationStubConstant) \
+ V(AllocateRegularInOldGenerationStubConstant) \
+ V(ArrayConstructorStubConstant) \
+ V(BigIntMapConstant) \
+ V(BooleanMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(EmptyFixedArrayConstant) \
+ V(EmptyStringConstant) \
+ V(FixedArrayMapConstant) \
+ V(PropertyArrayMapConstant) \
+ V(FixedDoubleArrayMapConstant) \
+ V(HeapNumberMapConstant) \
+ V(OptimizedOutConstant) \
+ V(StaleRegisterConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(NullConstant) \
+ V(ZeroConstant) \
+ V(OneConstant) \
+ V(NaNConstant) \
+ V(MinusOneConstant) \
+ V(EmptyStateValues) \
V(SingleDeadTypedStateValues)
// Cached global node accessor methods.
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 86250e9d1f..c79c793ae6 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/heap-refs.h"
#ifdef ENABLE_SLOW_DCHECKS
#include <algorithm>
@@ -12,6 +13,7 @@
#include "src/ast/modules.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
#include "src/compiler/vector-slot-pair.h"
@@ -26,6 +28,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/template-objects-inl.h"
#include "src/objects/templates.h"
#include "src/utils/boxed-float.h"
#include "src/utils/utils.h"
@@ -121,17 +124,31 @@ class PropertyCellData : public HeapObjectData {
ObjectData* value_ = nullptr;
};
+// TODO(mslekova): Once we have real-world usage data, we might want to
+// reimplement this as sorted vector instead, to reduce the memory overhead.
+typedef ZoneMap<MapData*, HolderLookupResult> KnownReceiversMap;
+
class FunctionTemplateInfoData : public HeapObjectData {
public:
FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object);
- void Serialize(JSHeapBroker* broker);
- ObjectData* call_code() const { return call_code_; }
+ bool is_signature_undefined() const { return is_signature_undefined_; }
+ bool accept_any_receiver() const { return accept_any_receiver_; }
+ bool has_call_code() const { return has_call_code_; }
+
+ void SerializeCallCode(JSHeapBroker* broker);
+ CallHandlerInfoData* call_code() const { return call_code_; }
+ KnownReceiversMap& known_receivers() { return known_receivers_; }
private:
- bool serialized_ = false;
- ObjectData* call_code_ = nullptr;
+ bool serialized_call_code_ = false;
+ CallHandlerInfoData* call_code_ = nullptr;
+ bool is_signature_undefined_ = false;
+ bool accept_any_receiver_ = false;
+ bool has_call_code_ = false;
+
+ KnownReceiversMap known_receivers_;
};
class CallHandlerInfoData : public HeapObjectData {
@@ -154,7 +171,16 @@ class CallHandlerInfoData : public HeapObjectData {
FunctionTemplateInfoData::FunctionTemplateInfoData(
JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object)
- : HeapObjectData(broker, storage, object) {}
+ : HeapObjectData(broker, storage, object),
+ known_receivers_(broker->zone()) {
+ auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
+ is_signature_undefined_ =
+ function_template_info->signature().IsUndefined(broker->isolate());
+ accept_any_receiver_ = function_template_info->accept_any_receiver();
+
+ CallOptimization call_optimization(broker->isolate(), object);
+ has_call_code_ = call_optimization.is_simple_api_call();
+}
CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
ObjectData** storage,
@@ -181,18 +207,17 @@ void PropertyCellData::Serialize(JSHeapBroker* broker) {
value_ = broker->GetOrCreateData(cell->value());
}
-void FunctionTemplateInfoData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
+void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
+ if (serialized_call_code_) return;
+ serialized_call_code_ = true;
- TraceScope tracer(broker, this, "FunctionTemplateInfoData::Serialize");
+ TraceScope tracer(broker, this,
+ "FunctionTemplateInfoData::SerializeCallCode");
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
DCHECK_NULL(call_code_);
- call_code_ = broker->GetOrCreateData(function_template_info->call_code());
-
- if (call_code_->IsCallHandlerInfo()) {
- call_code_->AsCallHandlerInfo()->Serialize(broker);
- }
+ call_code_ = broker->GetOrCreateData(function_template_info->call_code())
+ ->AsCallHandlerInfo();
+ call_code_->Serialize(broker);
}
void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
@@ -231,6 +256,12 @@ class JSObjectField {
uint64_t number_bits_ = 0;
};
+struct FieldIndexHasher {
+ size_t operator()(FieldIndex field_index) const {
+ return field_index.index();
+ }
+};
+
class JSObjectData : public HeapObjectData {
public:
JSObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -253,12 +284,15 @@ class JSObjectData : public HeapObjectData {
ObjectData* GetOwnConstantElement(JSHeapBroker* broker, uint32_t index,
bool serialize);
+ ObjectData* GetOwnProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index, bool serialize);
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
private:
- void SerializeRecursive(JSHeapBroker* broker, int max_depths);
+ void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths);
FixedArrayBaseData* elements_ = nullptr;
bool cow_or_empty_elements_tenured_ = false;
@@ -277,6 +311,12 @@ class JSObjectData : public HeapObjectData {
// non-configurable, or (2) are known not to (possibly they don't exist at
// all). In case (2), the second pair component is nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
+ // Properties that either:
+ // (1) are known to exist directly on the object, or
+ // (2) are known not to (possibly they don't exist at all).
+ // In case (2), the second pair component is nullptr.
+ // For simplicity, this may in theory overlap with inobject_fields_.
+ ZoneUnorderedMap<FieldIndex, ObjectData*, FieldIndexHasher> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
@@ -312,6 +352,15 @@ base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
}
return base::nullopt;
}
+
+ObjectRef GetOwnPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ Representation representation,
+ FieldIndex field_index) {
+ Handle<Object> constant =
+ JSObject::FastPropertyAt(receiver, representation, field_index);
+ return ObjectRef(broker, constant);
+}
} // namespace
ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
@@ -333,6 +382,27 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
return result;
}
+ObjectData* JSObjectData::GetOwnProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index,
+ bool serialize) {
+ auto p = own_properties_.find(field_index);
+ if (p != own_properties_.end()) return p->second;
+
+ if (!serialize) {
+ TRACE_MISSING(broker, "knowledge about property with index "
+ << field_index.property_index() << " on "
+ << this);
+ return nullptr;
+ }
+
+ ObjectRef property = GetOwnPropertyFromHeap(
+ broker, Handle<JSObject>::cast(object()), representation, field_index);
+ ObjectData* result(property.data());
+ own_properties_.insert(std::make_pair(field_index, result));
+ return result;
+}
+
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -503,24 +573,18 @@ class ContextData : public HeapObjectData {
public:
ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object);
- void SerializeContextChain(JSHeapBroker* broker);
- ContextData* previous() const {
- CHECK(serialized_context_chain_);
- return previous_;
- }
+ // {previous} will return the closest valid context possible to desired
+ // {depth}, decrementing {depth} for each previous link successfully followed.
+ // If {serialize} is true, it will serialize contexts along the way.
+ ContextData* previous(JSHeapBroker* broker, size_t* depth, bool serialize);
- void SerializeSlot(JSHeapBroker* broker, int index);
-
- ObjectData* GetSlot(int index) {
- auto search = slots_.find(index);
- CHECK(search != slots_.end());
- return search->second;
- }
+ // Returns nullptr if the slot index isn't valid or wasn't serialized
+ // (unless {serialize} is true).
+ ObjectData* GetSlot(JSHeapBroker* broker, int index, bool serialize);
private:
ZoneMap<int, ObjectData*> slots_;
- bool serialized_context_chain_ = false;
ContextData* previous_ = nullptr;
};
@@ -528,28 +592,46 @@ ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object)
: HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
-void ContextData::SerializeContextChain(JSHeapBroker* broker) {
- if (serialized_context_chain_) return;
- serialized_context_chain_ = true;
+ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth,
+ bool serialize) {
+ if (*depth == 0) return this;
- TraceScope tracer(broker, this, "ContextData::SerializeContextChain");
- Handle<Context> context = Handle<Context>::cast(object());
+ if (serialize && previous_ == nullptr) {
+ TraceScope tracer(broker, this, "ContextData::previous");
+ Handle<Context> context = Handle<Context>::cast(object());
+ Object prev = context->unchecked_previous();
+ if (prev.IsContext()) {
+ previous_ = broker->GetOrCreateData(prev)->AsContext();
+ }
+ }
- DCHECK_NULL(previous_);
- // Context::previous DCHECK-fails when called on the native context.
- if (!context->IsNativeContext()) {
- previous_ = broker->GetOrCreateData(context->previous())->AsContext();
- previous_->SerializeContextChain(broker);
+ if (previous_ != nullptr) {
+ *depth = *depth - 1;
+ return previous_->previous(broker, depth, serialize);
}
+ return this;
}
-void ContextData::SerializeSlot(JSHeapBroker* broker, int index) {
- TraceScope tracer(broker, this, "ContextData::SerializeSlot");
- TRACE(broker, "Serializing script context slot " << index);
- Handle<Context> context = Handle<Context>::cast(object());
- CHECK(index >= 0 && index < context->length());
- ObjectData* odata = broker->GetOrCreateData(context->get(index));
- slots_.insert(std::make_pair(index, odata));
+ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
+ bool serialize) {
+ CHECK_GE(index, 0);
+ auto search = slots_.find(index);
+ if (search != slots_.end()) {
+ return search->second;
+ }
+
+ if (serialize) {
+ Handle<Context> context = Handle<Context>::cast(object());
+ if (index < context->length()) {
+ TraceScope tracer(broker, this, "ContextData::GetSlot");
+ TRACE(broker, "Serializing context slot " << index);
+ ObjectData* odata = broker->GetOrCreateData(context->get(index));
+ slots_.insert(std::make_pair(index, odata));
+ return odata;
+ }
+ }
+
+ return nullptr;
}
class NativeContextData : public ContextData {
@@ -564,6 +646,11 @@ class NativeContextData : public ContextData {
return function_maps_;
}
+ ScopeInfoData* scope_info() const {
+ CHECK(serialized_);
+ return scope_info_;
+ }
+
NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object);
void Serialize(JSHeapBroker* broker);
@@ -574,6 +661,7 @@ class NativeContextData : public ContextData {
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
ZoneVector<MapData*> function_maps_;
+ ScopeInfoData* scope_info_ = nullptr;
};
class NameData : public HeapObjectData {
@@ -674,14 +762,15 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
DCHECK_GE(max_depth, 0);
DCHECK_GE(*max_properties, 0);
+ Isolate* const isolate = boilerplate->GetIsolate();
+
// Make sure the boilerplate map is not deprecated.
- if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+ if (!JSObject::TryMigrateInstance(isolate, boilerplate)) return false;
// Check for too deep nesting.
if (max_depth == 0) return false;
// Check the elements.
- Isolate* const isolate = boilerplate->GetIsolate();
Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
if (elements->length() > 0 &&
elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
@@ -780,6 +869,18 @@ class AllocationSiteData : public HeapObjectData {
bool serialized_boilerplate_ = false;
};
+class BigIntData : public HeapObjectData {
+ public:
+ BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object)
+ : HeapObjectData(broker, storage, object),
+ as_uint64_(object->AsUint64(nullptr)) {}
+
+ uint64_t AsUint64() const { return as_uint64_; }
+
+ private:
+ const uint64_t as_uint64_;
+};
+
// Only used in JSNativeContextSpecialization.
class ScriptContextTableData : public HeapObjectData {
public:
@@ -1215,7 +1316,8 @@ JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object)
: HeapObjectData(broker, storage, object),
inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()) {}
+ own_constant_elements_(broker->zone()),
+ own_properties_(broker->zone()) {}
FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedArray> object)
@@ -1282,18 +1384,106 @@ class BytecodeArrayData : public FixedArrayBaseData {
return incoming_new_target_or_generator_register_;
}
+ uint8_t get(int index) const {
+ DCHECK(is_serialized_for_compilation_);
+ return bytecodes_[index];
+ }
+
+ Address GetFirstBytecodeAddress() const {
+ return reinterpret_cast<Address>(bytecodes_.data());
+ }
+
+ Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const {
+ return constant_pool_[index]->object();
+ }
+
+ bool IsConstantAtIndexSmi(int index) const {
+ return constant_pool_[index]->is_smi();
+ }
+
+ Smi GetConstantAtIndexAsSmi(int index) const {
+ return *(Handle<Smi>::cast(constant_pool_[index]->object()));
+ }
+
+ bool IsSerializedForCompilation() const {
+ return is_serialized_for_compilation_;
+ }
+
+ void SerializeForCompilation(JSHeapBroker* broker) {
+ if (is_serialized_for_compilation_) return;
+
+ Handle<BytecodeArray> bytecode_array =
+ Handle<BytecodeArray>::cast(object());
+
+ DCHECK(bytecodes_.empty());
+ bytecodes_.reserve(bytecode_array->length());
+ for (int i = 0; i < bytecode_array->length(); i++) {
+ bytecodes_.push_back(bytecode_array->get(i));
+ }
+
+ DCHECK(constant_pool_.empty());
+ Handle<FixedArray> constant_pool(bytecode_array->constant_pool(),
+ broker->isolate());
+ constant_pool_.reserve(constant_pool->length());
+ for (int i = 0; i < constant_pool->length(); i++) {
+ constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i)));
+ }
+
+ Handle<ByteArray> source_position_table(
+ bytecode_array->SourcePositionTableIfCollected(), broker->isolate());
+ source_positions_.reserve(source_position_table->length());
+ for (int i = 0; i < source_position_table->length(); i++) {
+ source_positions_.push_back(source_position_table->get(i));
+ }
+
+ Handle<ByteArray> handlers(bytecode_array->handler_table(),
+ broker->isolate());
+ handler_table_.reserve(handlers->length());
+ for (int i = 0; i < handlers->length(); i++) {
+ handler_table_.push_back(handlers->get(i));
+ }
+
+ is_serialized_for_compilation_ = true;
+ }
+
+ const byte* source_positions_address() const {
+ return source_positions_.data();
+ }
+
+ size_t source_positions_size() const { return source_positions_.size(); }
+
+ Address handler_table_address() const {
+ CHECK(is_serialized_for_compilation_);
+ return reinterpret_cast<Address>(handler_table_.data());
+ }
+
+ int handler_table_size() const {
+ CHECK(is_serialized_for_compilation_);
+ return static_cast<int>(handler_table_.size());
+ }
+
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
register_count_(object->register_count()),
parameter_count_(object->parameter_count()),
incoming_new_target_or_generator_register_(
- object->incoming_new_target_or_generator_register()) {}
+ object->incoming_new_target_or_generator_register()),
+ bytecodes_(broker->zone()),
+ source_positions_(broker->zone()),
+ handler_table_(broker->zone()),
+ constant_pool_(broker->zone()) {}
private:
int const register_count_;
int const parameter_count_;
interpreter::Register const incoming_new_target_or_generator_register_;
+
+ bool is_serialized_for_compilation_ = false;
+ ZoneVector<uint8_t> bytecodes_;
+ ZoneVector<uint8_t> source_positions_;
+ ZoneVector<uint8_t> handler_table_;
+ ZoneVector<ObjectData*> constant_pool_;
};
class JSArrayData : public JSObjectData {
@@ -1377,6 +1567,22 @@ class SharedFunctionInfoData : public HeapObjectData {
void SetSerializedForCompilation(JSHeapBroker* broker,
FeedbackVectorRef feedback);
bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
+ void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
+ FunctionTemplateInfoData* function_template_info() const {
+ return function_template_info_;
+ }
+ JSArrayData* GetTemplateObject(FeedbackSlot slot) const {
+ auto lookup_it = template_objects_.find(slot.ToInt());
+ if (lookup_it != template_objects_.cend()) {
+ return lookup_it->second;
+ }
+ return nullptr;
+ }
+ void SetTemplateObject(FeedbackSlot slot, JSArrayData* object) {
+ CHECK(
+ template_objects_.insert(std::make_pair(slot.ToInt(), object)).second);
+ }
+
#define DECL_ACCESSOR(type, name) \
type name() const { return name##_; }
BROKER_SFI_FIELDS(DECL_ACCESSOR)
@@ -1391,6 +1597,8 @@ class SharedFunctionInfoData : public HeapObjectData {
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
+ FunctionTemplateInfoData* function_template_info_;
+ ZoneMap<int, JSArrayData*> template_objects_;
};
SharedFunctionInfoData::SharedFunctionInfoData(
@@ -1408,7 +1616,9 @@ SharedFunctionInfoData::SharedFunctionInfoData(
#define INIT_MEMBER(type, name) , name##_(object->name())
BROKER_SFI_FIELDS(INIT_MEMBER)
#undef INIT_MEMBER
-{
+ ,
+ function_template_info_(nullptr),
+ template_objects_(broker->zone()) {
DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
}
@@ -1420,15 +1630,28 @@ void SharedFunctionInfoData::SetSerializedForCompilation(
<< " as serialized for compilation");
}
+void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
+ JSHeapBroker* broker) {
+ if (function_template_info_) return;
+
+ function_template_info_ =
+ broker
+ ->GetOrCreateData(handle(
+ Handle<SharedFunctionInfo>::cast(object())->function_data(),
+ broker->isolate()))
+ ->AsFunctionTemplateInfo();
+}
+
bool SharedFunctionInfoData::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
return serialized_for_compilation_.find(feedback.object()) !=
serialized_for_compilation_.end();
}
-class ModuleData : public HeapObjectData {
+class SourceTextModuleData : public HeapObjectData {
public:
- ModuleData(JSHeapBroker* broker, ObjectData** storage, Handle<Module> object);
+ SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<SourceTextModule> object);
void Serialize(JSHeapBroker* broker);
CellData* GetCell(int cell_index) const;
@@ -1439,35 +1662,36 @@ class ModuleData : public HeapObjectData {
ZoneVector<CellData*> exports_;
};
-ModuleData::ModuleData(JSHeapBroker* broker, ObjectData** storage,
- Handle<Module> object)
+SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<SourceTextModule> object)
: HeapObjectData(broker, storage, object),
imports_(broker->zone()),
exports_(broker->zone()) {}
-CellData* ModuleData::GetCell(int cell_index) const {
+CellData* SourceTextModuleData::GetCell(int cell_index) const {
CHECK(serialized_);
CellData* cell;
- switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
- case ModuleDescriptor::kImport:
- cell = imports_.at(Module::ImportIndex(cell_index));
+ switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case SourceTextModuleDescriptor::kImport:
+ cell = imports_.at(SourceTextModule::ImportIndex(cell_index));
break;
- case ModuleDescriptor::kExport:
- cell = exports_.at(Module::ExportIndex(cell_index));
+ case SourceTextModuleDescriptor::kExport:
+ cell = exports_.at(SourceTextModule::ExportIndex(cell_index));
break;
- case ModuleDescriptor::kInvalid:
+ case SourceTextModuleDescriptor::kInvalid:
UNREACHABLE();
}
CHECK_NOT_NULL(cell);
return cell;
}
-void ModuleData::Serialize(JSHeapBroker* broker) {
+void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
- TraceScope tracer(broker, this, "ModuleData::Serialize");
- Handle<Module> module = Handle<Module>::cast(object());
+ TraceScope tracer(broker, this, "SourceTextModuleData::Serialize");
+ Handle<SourceTextModule> module = Handle<SourceTextModule>::cast(object());
// TODO(neis): We could be smarter and only serialize the cells we care about.
// TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector.
@@ -1614,7 +1838,7 @@ bool JSObjectData::cow_or_empty_elements_tenured() const {
FixedArrayBaseData* JSObjectData::elements() const { return elements_; }
void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
- SerializeRecursive(broker, kMaxFastLiteralDepth);
+ SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
}
void JSObjectData::SerializeElements(JSHeapBroker* broker) {
@@ -1717,11 +1941,13 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
<< contents.size() << " total)");
}
-void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
+void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
+ int depth) {
if (serialized_as_boilerplate_) return;
serialized_as_boilerplate_ = true;
- TraceScope tracer(broker, this, "JSObjectData::SerializeRecursive");
+ TraceScope tracer(broker, this,
+ "JSObjectData::SerializeRecursiveAsBoilerplate");
Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
// We only serialize boilerplates that pass the IsInlinableFastLiteral
@@ -1767,7 +1993,8 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
ObjectData* value_data = broker->GetOrCreateData(value);
- value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
}
}
} else {
@@ -1802,9 +2029,22 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
} else {
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
isolate);
+ // In case of unboxed double fields we use a sentinel NaN value to mark
+ // uninitialized fields. A boilerplate value with such a field may migrate
+ // from its unboxed double to a tagged representation. In the process the
+ // raw double is converted to a heap number. The sentinel value carries no
+ // special meaning when it occurs in a heap number, so we would like to
+ // recover the uninitialized value.
+ // We check for the sentinel here, specifically, since migrations might
+ // have been triggered as part of boilerplate serialization.
+ if (value->IsHeapNumber() &&
+ HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
+ value = isolate->factory()->uninitialized_value();
+ }
ObjectData* value_data = broker->GetOrCreateData(value);
if (value->IsJSObject()) {
- value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
}
inobject_fields_.push_back(JSObjectField{value_data});
}
@@ -1839,35 +2079,50 @@ bool ObjectRef::equals(const ObjectRef& other) const {
Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
-ContextRef ContextRef::previous() const {
+ContextRef ContextRef::previous(size_t* depth, bool serialize) const {
+ DCHECK_NOT_NULL(depth);
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- return ContextRef(broker(),
- handle(object()->previous(), broker()->isolate()));
+ Context current = *object();
+ while (*depth != 0 && current.unchecked_previous().IsContext()) {
+ current = Context::cast(current.unchecked_previous());
+ (*depth)--;
+ }
+ return ContextRef(broker(), handle(current, broker()->isolate()));
}
- return ContextRef(broker(), data()->AsContext()->previous());
+ ContextData* current = this->data()->AsContext();
+ return ContextRef(broker(), current->previous(broker(), depth, serialize));
}
-// Not needed for TypedLowering.
-ObjectRef ContextRef::get(int index) const {
+base::Optional<ObjectRef> ContextRef::get(int index, bool serialize) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
Handle<Object> value(object()->get(index), broker()->isolate());
return ObjectRef(broker(), value);
}
- return ObjectRef(broker(), data()->AsContext()->GetSlot(index));
+ ObjectData* optional_slot =
+ data()->AsContext()->GetSlot(broker(), index, serialize);
+ if (optional_slot != nullptr) {
+ return ObjectRef(broker(), optional_slot);
+ }
+ return base::nullopt;
}
-JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone)
+JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
+ bool tracing_enabled)
: isolate_(isolate),
broker_zone_(broker_zone),
current_zone_(broker_zone),
refs_(new (zone())
RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())),
array_and_object_prototypes_(zone()),
- feedback_(zone()) {
+ tracing_enabled_(tracing_enabled),
+ feedback_(zone()),
+ bytecode_analyses_(zone()),
+ ais_for_loading_then_(zone()),
+ ais_for_loading_exec_(zone()) {
// Note that this initialization of the refs_ pointer with the minimal
// initial capacity is redundant in the normal use case (concurrent
// compilation enabled, standard objects to be serialized), as the map
@@ -1939,7 +2194,9 @@ void JSHeapBroker::SerializeShareableObjects() {
{
Builtins::Name builtins[] = {
Builtins::kAllocateInYoungGeneration,
+ Builtins::kAllocateRegularInYoungGeneration,
Builtins::kAllocateInOldGeneration,
+ Builtins::kAllocateRegularInOldGeneration,
Builtins::kArgumentsAdaptorTrampoline,
Builtins::kArrayConstructorImpl,
Builtins::kCallFunctionForwardVarargs,
@@ -2400,6 +2657,11 @@ bool AllocationSiteRef::IsFastLiteral() const {
return data()->AsAllocationSite()->IsFastLiteral();
}
+void JSObjectRef::SerializeElements() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSObject()->SerializeElements(broker());
+}
+
void JSObjectRef::EnsureElementsTenured() {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation allow_handle_allocation;
@@ -2553,6 +2815,95 @@ double FixedDoubleArrayRef::get_scalar(int i) const {
return data()->AsFixedDoubleArray()->Get(i).get_scalar();
}
+uint8_t BytecodeArrayRef::get(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->get(index);
+ }
+ return data()->AsBytecodeArray()->get(index);
+}
+
+Address BytecodeArrayRef::GetFirstBytecodeAddress() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->GetFirstBytecodeAddress();
+ }
+ return data()->AsBytecodeArray()->GetFirstBytecodeAddress();
+}
+
+Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return handle(object()->constant_pool().get(index), broker()->isolate());
+ }
+ return data()->AsBytecodeArray()->GetConstantAtIndex(index,
+ broker()->isolate());
+}
+
+bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->constant_pool().get(index).IsSmi();
+ }
+ return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index);
+}
+
+Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return Smi::cast(object()->constant_pool().get(index));
+ }
+ return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
+}
+
+bool BytecodeArrayRef::IsSerializedForCompilation() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return true;
+ return data()->AsBytecodeArray()->IsSerializedForCompilation();
+}
+
+void BytecodeArrayRef::SerializeForCompilation() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ data()->AsBytecodeArray()->SerializeForCompilation(broker());
+}
+
+const byte* BytecodeArrayRef::source_positions_address() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->SourcePositionTableIfCollected().GetDataStartAddress();
+ }
+ return data()->AsBytecodeArray()->source_positions_address();
+}
+
+int BytecodeArrayRef::source_positions_size() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->SourcePositionTableIfCollected().length();
+ }
+ return static_cast<int>(data()->AsBytecodeArray()->source_positions_size());
+}
+
+Address BytecodeArrayRef::handler_table_address() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return reinterpret_cast<Address>(
+ object()->handler_table().GetDataStartAddress());
+ }
+ return data()->AsBytecodeArray()->handler_table_address();
+}
+
+int BytecodeArrayRef::handler_table_size() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->handler_table().length();
+ }
+ return data()->AsBytecodeArray()->handler_table_size();
+}
+
#define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \
if (broker()->mode() == JSHeapBroker::kDisabled) { \
AllowHandleAllocation handle_allocation; \
@@ -2630,15 +2981,13 @@ BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
-BIMODAL_ACCESSOR_B(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
-BIMODAL_ACCESSOR_B(Map, bit_field2, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
-BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_extensible, Map::IsExtensibleBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed,
Map::IsAccessCheckNeededBit)
@@ -2663,7 +3012,109 @@ BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-BIMODAL_ACCESSOR(FunctionTemplateInfo, Object, call_code)
+base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return CallHandlerInfoRef(
+ broker(), handle(object()->call_code(), broker()->isolate()));
+ }
+ CallHandlerInfoData* call_code =
+ data()->AsFunctionTemplateInfo()->call_code();
+ if (!call_code) return base::nullopt;
+ return CallHandlerInfoRef(broker(), call_code);
+}
+
+bool FunctionTemplateInfoRef::is_signature_undefined() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ return object()->signature().IsUndefined(broker()->isolate());
+ }
+ return data()->AsFunctionTemplateInfo()->is_signature_undefined();
+}
+
+bool FunctionTemplateInfoRef::has_call_code() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ CallOptimization call_optimization(broker()->isolate(), object());
+ return call_optimization.is_simple_api_call();
+ }
+ return data()->AsFunctionTemplateInfo()->has_call_code();
+}
+
+BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
+
+HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
+ MapRef receiver_map, bool serialize) {
+ const HolderLookupResult not_found;
+
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ CallOptimization call_optimization(broker()->isolate(), object());
+ Handle<Map> receiver_map_ref(receiver_map.object());
+ if (!receiver_map_ref->IsJSReceiverMap() ||
+ (receiver_map_ref->is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
+ return not_found;
+ }
+
+ HolderLookupResult result;
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_map_ref, &result.lookup);
+
+ switch (result.lookup) {
+ case CallOptimization::kHolderFound:
+ result.holder = JSObjectRef(broker(), holder);
+ break;
+ default:
+ DCHECK_EQ(result.holder, base::nullopt);
+ break;
+ }
+ return result;
+ }
+
+ FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
+ KnownReceiversMap::iterator lookup_it =
+ fti_data->known_receivers().find(receiver_map.data()->AsMap());
+ if (lookup_it != fti_data->known_receivers().cend()) {
+ return lookup_it->second;
+ }
+ if (!serialize) {
+ TRACE_BROKER_MISSING(broker(),
+ "holder for receiver with map " << receiver_map);
+ return not_found;
+ }
+ if (!receiver_map.IsJSReceiverMap() ||
+ (receiver_map.is_access_check_needed() && !accept_any_receiver())) {
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), not_found});
+ return not_found;
+ }
+
+ HolderLookupResult result;
+ CallOptimization call_optimization(broker()->isolate(), object());
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_map.object(), &result.lookup);
+
+ switch (result.lookup) {
+ case CallOptimization::kHolderFound: {
+ result.holder = JSObjectRef(broker(), holder);
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), result});
+ break;
+ }
+ default: {
+ DCHECK_EQ(result.holder, base::nullopt);
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), result});
+ }
+ }
+ return result;
+}
BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
@@ -2746,11 +3197,21 @@ bool StringRef::IsSeqString() const {
return data()->AsString()->is_seq_string();
}
+ScopeInfoRef NativeContextRef::scope_info() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return ScopeInfoRef(broker(),
+ handle(object()->scope_info(), broker()->isolate()));
+ }
+ return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
+}
+
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
if (broker()->mode() == JSHeapBroker::kDisabled) {
- return get(index).AsMap();
+ return get(index).value().AsMap();
}
return MapRef(broker(), data()->AsNativeContext()->function_maps().at(
index - Context::FIRST_FUNCTION_MAP_INDEX));
@@ -2853,6 +3314,19 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
return ObjectRef(broker(), element);
}
+base::Optional<ObjectRef> JSObjectRef::GetOwnProperty(
+ Representation field_representation, FieldIndex index,
+ bool serialize) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return GetOwnPropertyFromHeap(broker(), Handle<JSObject>::cast(object()),
+ field_representation, index);
+ }
+ ObjectData* property = data()->AsJSObject()->GetOwnProperty(
+ broker(), field_representation, index, serialize);
+ if (property == nullptr) return base::nullopt;
+ return ObjectRef(broker(), property);
+}
+
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index,
bool serialize) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
@@ -2884,14 +3358,19 @@ double MutableHeapNumberRef::value() const {
return data()->AsMutableHeapNumber()->value();
}
-CellRef ModuleRef::GetCell(int cell_index) const {
+uint64_t BigIntRef::AsUint64() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(BigInt, AsUint64);
+ return data()->AsBigInt()->AsUint64();
+}
+
+CellRef SourceTextModuleRef::GetCell(int cell_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
return CellRef(broker(),
handle(object()->GetCell(cell_index), broker()->isolate()));
}
- return CellRef(broker(), data()->AsModule()->GetCell(cell_index));
+ return CellRef(broker(), data()->AsSourceTextModule()->GetCell(cell_index));
}
ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
@@ -3108,6 +3587,8 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
for (int i = first; i <= last; ++i) {
function_maps_.push_back(broker->GetOrCreateData(context->get(i))->AsMap());
}
+
+ scope_info_ = broker->GetOrCreateData(context->scope_info())->AsScopeInfo();
}
void JSFunctionRef::Serialize() {
@@ -3133,6 +3614,46 @@ bool JSFunctionRef::IsSerializedForCompilation() const {
shared().IsSerializedForCompilation(feedback_vector());
}
+JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description,
+ FeedbackVectorRef vector,
+ FeedbackSlot slot,
+ bool serialize) {
+ // Look in the feedback vector for the array. A Smi indicates that it's
+ // not yet cached here.
+ ObjectRef candidate = vector.get(slot);
+ if (!candidate.IsSmi()) {
+ return candidate.AsJSArray();
+ }
+
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<TemplateObjectDescription> tod =
+ Handle<TemplateObjectDescription>::cast(description.object());
+ Handle<JSArray> template_object =
+ TemplateObjectDescription::GetTemplateObject(
+ broker()->isolate(), broker()->native_context().object(), tod,
+ object(), slot.ToInt());
+ return JSArrayRef(broker(), template_object);
+ }
+
+ JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot);
+ if (array != nullptr) return JSArrayRef(broker(), array);
+
+ CHECK(serialize);
+ CHECK(broker()->SerializingAllowed());
+
+ Handle<TemplateObjectDescription> tod =
+ Handle<TemplateObjectDescription>::cast(description.object());
+ Handle<JSArray> template_object =
+ TemplateObjectDescription::GetTemplateObject(
+ broker()->isolate(), broker()->native_context().object(), tod,
+ object(), slot.ToInt());
+ array = broker()->GetOrCreateData(template_object)->AsJSArray();
+ data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array);
+ return JSArrayRef(broker(), array);
+}
+
void SharedFunctionInfoRef::SetSerializedForCompilation(
FeedbackVectorRef feedback) {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -3140,9 +3661,27 @@ void SharedFunctionInfoRef::SetSerializedForCompilation(
feedback);
}
+void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+
+ data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
+}
+
+base::Optional<FunctionTemplateInfoRef>
+SharedFunctionInfoRef::function_template_info() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return FunctionTemplateInfoRef(
+ broker(), handle(object()->function_data(), broker()->isolate()));
+ }
+ FunctionTemplateInfoData* function_template_info =
+ data()->AsSharedFunctionInfo()->function_template_info();
+ if (!function_template_info) return base::nullopt;
+ return FunctionTemplateInfoRef(broker(), function_template_info);
+}
+
bool SharedFunctionInfoRef::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
- CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
+ if (broker()->mode() == JSHeapBroker::kDisabled) return true;
return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback);
}
@@ -3181,22 +3720,10 @@ bool MapRef::serialized_prototype() const {
return data()->AsMap()->serialized_prototype();
}
-void ModuleRef::Serialize() {
+void SourceTextModuleRef::Serialize() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsModule()->Serialize(broker());
-}
-
-void ContextRef::SerializeContextChain() {
- if (broker()->mode() == JSHeapBroker::kDisabled) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsContext()->SerializeContextChain(broker());
-}
-
-void ContextRef::SerializeSlot(int index) {
- if (broker()->mode() == JSHeapBroker::kDisabled) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsContext()->SerializeSlot(broker(), index);
+ data()->AsSourceTextModule()->Serialize(broker());
}
void NativeContextRef::Serialize() {
@@ -3228,10 +3755,10 @@ void PropertyCellRef::Serialize() {
data()->AsPropertyCell()->Serialize(broker());
}
-void FunctionTemplateInfoRef::Serialize() {
+void FunctionTemplateInfoRef::SerializeCallCode() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsFunctionTemplateInfo()->Serialize(broker());
+ data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
}
base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell(
@@ -3307,10 +3834,67 @@ base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
return {};
}
-ElementAccessFeedback::ElementAccessFeedback(Zone* zone)
+KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) {
+ if (IsKeyedLoadICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode());
+ }
+ if (IsKeyedHasICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode());
+ }
+ if (IsKeyedStoreICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode());
+ }
+ if (IsStoreInArrayLiteralICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kStoreInLiteral,
+ nexus.GetKeyedAccessStoreMode());
+ }
+ UNREACHABLE();
+}
+
+AccessMode KeyedAccessMode::access_mode() const { return access_mode_; }
+
+bool KeyedAccessMode::IsLoad() const {
+ return access_mode_ == AccessMode::kLoad || access_mode_ == AccessMode::kHas;
+}
+bool KeyedAccessMode::IsStore() const {
+ return access_mode_ == AccessMode::kStore ||
+ access_mode_ == AccessMode::kStoreInLiteral;
+}
+
+KeyedAccessLoadMode KeyedAccessMode::load_mode() const {
+ CHECK(IsLoad());
+ return load_store_mode_.load_mode;
+}
+
+KeyedAccessStoreMode KeyedAccessMode::store_mode() const {
+ CHECK(IsStore());
+ return load_store_mode_.store_mode;
+}
+
+KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessLoadMode load_mode)
+ : load_mode(load_mode) {}
+KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessStoreMode store_mode)
+ : store_mode(store_mode) {}
+
+KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
+ KeyedAccessLoadMode load_mode)
+ : access_mode_(access_mode), load_store_mode_(load_mode) {
+ CHECK(!IsStore());
+ CHECK(IsLoad());
+}
+KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
+ KeyedAccessStoreMode store_mode)
+ : access_mode_(access_mode), load_store_mode_(store_mode) {
+ CHECK(!IsLoad());
+ CHECK(IsStore());
+}
+
+ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
+ KeyedAccessMode const& keyed_mode)
: ProcessedFeedback(kElementAccess),
receiver_maps(zone),
- transitions(zone) {}
+ transitions(zone),
+ keyed_mode(keyed_mode) {}
ElementAccessFeedback::MapIterator::MapIterator(
ElementAccessFeedback const& processed, JSHeapBroker* broker)
@@ -3383,7 +3967,7 @@ GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback(
}
ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps) {
+ MapHandles const& maps, KeyedAccessMode const& keyed_mode) {
DCHECK(!maps.empty());
// Collect possible transition targets.
@@ -3397,7 +3981,8 @@ ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
}
}
- ElementAccessFeedback* result = new (zone()) ElementAccessFeedback(zone());
+ ElementAccessFeedback* result =
+ new (zone()) ElementAccessFeedback(zone(), keyed_mode);
// Separate the actual receiver maps and the possible transition sources.
for (Handle<Map> map : maps) {
@@ -3464,7 +4049,7 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess(
}
ContextRef context_ref(this, context);
if (immutable) {
- context_ref.SerializeSlot(context_slot_index);
+ context_ref.get(context_slot_index, true);
}
return new (zone())
GlobalAccessFeedback(context_ref, context_slot_index, immutable);
@@ -3489,6 +4074,54 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
return NameRef(this, handle(raw_name, isolate()));
}
+PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingThen(MapRef map) {
+ auto access_info = ais_for_loading_then_.find(map);
+ if (access_info == ais_for_loading_then_.end()) {
+ TRACE_BROKER_MISSING(
+ this, "access info for reducing JSResolvePromise with map " << map);
+ return PropertyAccessInfo::Invalid(zone());
+ }
+ return access_info->second;
+}
+
+void JSHeapBroker::CreateAccessInfoForLoadingThen(
+ MapRef map, CompilationDependencies* dependencies) {
+ auto access_info = ais_for_loading_then_.find(map);
+ if (access_info == ais_for_loading_then_.end()) {
+ AccessInfoFactory access_info_factory(this, dependencies, zone());
+ Handle<Name> then_string = isolate()->factory()->then_string();
+ ais_for_loading_then_.insert(
+ std::make_pair(map, access_info_factory.ComputePropertyAccessInfo(
+ map.object(), then_string, AccessMode::kLoad)));
+ }
+}
+
+PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingExec(MapRef map) {
+ auto access_info = ais_for_loading_exec_.find(map);
+ if (access_info == ais_for_loading_exec_.end()) {
+ TRACE_BROKER_MISSING(this,
+ "access info for property 'exec' on map " << map);
+ return PropertyAccessInfo::Invalid(zone());
+ }
+ return access_info->second;
+}
+
+PropertyAccessInfo const& JSHeapBroker::CreateAccessInfoForLoadingExec(
+ MapRef map, CompilationDependencies* dependencies) {
+ auto access_info = ais_for_loading_exec_.find(map);
+ if (access_info != ais_for_loading_exec_.end()) {
+ return access_info->second;
+ }
+
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ AccessInfoFactory access_info_factory(this, dependencies, zone());
+ PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo(
+ map.object(), isolate()->factory()->exec_string(), AccessMode::kLoad);
+
+ auto inserted_ai = ais_for_loading_exec_.insert(std::make_pair(map, ai_exec));
+ return inserted_ai.first->second;
+}
+
ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const {
CHECK_EQ(kElementAccess, kind());
return static_cast<ElementAccessFeedback const*>(this);
@@ -3499,6 +4132,66 @@ NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const {
return static_cast<NamedAccessFeedback const*>(this);
}
+BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
+ Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id,
+ bool analyze_liveness, bool serialize) {
+ ObjectData* bytecode_array_data = GetData(bytecode_array);
+ CHECK_NOT_NULL(bytecode_array_data);
+
+ auto it = bytecode_analyses_.find(bytecode_array_data);
+ if (it != bytecode_analyses_.end()) {
+ // Bytecode analysis can be run for OSR or for non-OSR. In the rare case
+ // where we optimize for OSR and consider the top-level function itself for
+ // inlining (because of recursion), we need both the OSR and the non-OSR
+ // analysis. Fortunately, the only difference between the two lies in
+ // whether the OSR entry offset gets computed (from the OSR bailout id).
+ // Hence it's okay to reuse the OSR-version when asked for the non-OSR
+ // version, such that we need to store at most one analysis result per
+ // bytecode array.
+ CHECK_IMPLIES(osr_bailout_id != it->second->osr_bailout_id(),
+ osr_bailout_id.IsNone());
+ CHECK_EQ(analyze_liveness, it->second->liveness_analyzed());
+ return *it->second;
+ }
+
+ CHECK(serialize);
+ BytecodeAnalysis* analysis = new (zone()) BytecodeAnalysis(
+ bytecode_array, zone(), osr_bailout_id, analyze_liveness);
+ DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id);
+ bytecode_analyses_[bytecode_array_data] = analysis;
+ return *analysis;
+}
+
+OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
+ : array_(bytecode_array) {}
+
+int OffHeapBytecodeArray::length() const { return array_.length(); }
+
+int OffHeapBytecodeArray::parameter_count() const {
+ return array_.parameter_count();
+}
+
+uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); }
+
+void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); }
+
+Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const {
+ return array_.GetFirstBytecodeAddress();
+}
+
+Handle<Object> OffHeapBytecodeArray::GetConstantAtIndex(
+ int index, Isolate* isolate) const {
+ return array_.GetConstantAtIndex(index);
+}
+
+bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const {
+ return array_.IsConstantAtIndexSmi(index);
+}
+
+Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const {
+ return array_.GetConstantAtIndexAsSmi(index);
+}
+
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 2c4cc766bc..ffc10d2b93 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -8,796 +8,24 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/refs-map.h"
#include "src/handles/handles.h"
+#include "src/interpreter/bytecode-array-accessor.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/function-kind.h"
-#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
-
-class BytecodeArray;
-class CallHandlerInfo;
-class FixedDoubleArray;
-class FunctionTemplateInfo;
-class HeapNumber;
-class InternalizedString;
-class JSBoundFunction;
-class JSDataView;
-class JSGlobalProxy;
-class JSRegExp;
-class JSTypedArray;
-class NativeContext;
-class ScriptContextTable;
-class VectorSlotPair;
-
namespace compiler {
-// Whether we are loading a property or storing to a property.
-// For a store during literal creation, do not walk up the prototype chain.
-enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
-
-enum class OddballType : uint8_t {
- kNone, // Not an Oddball.
- kBoolean, // True or False.
- kUndefined,
- kNull,
- kHole,
- kUninitialized,
- kOther // Oddball, but none of the above.
-};
-
-// This list is sorted such that subtypes appear before their supertypes.
-// DO NOT VIOLATE THIS PROPERTY!
-#define HEAP_BROKER_OBJECT_LIST(V) \
- /* Subtypes of JSObject */ \
- V(JSArray) \
- V(JSBoundFunction) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSGlobalProxy) \
- V(JSRegExp) \
- V(JSTypedArray) \
- /* Subtypes of Context */ \
- V(NativeContext) \
- /* Subtypes of FixedArray */ \
- V(Context) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- /* Subtypes of FixedArrayBase */ \
- V(BytecodeArray) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- /* Subtypes of Name */ \
- V(InternalizedString) \
- V(String) \
- V(Symbol) \
- /* Subtypes of HeapObject */ \
- V(AllocationSite) \
- V(CallHandlerInfo) \
- V(Cell) \
- V(Code) \
- V(DescriptorArray) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArrayBase) \
- V(FunctionTemplateInfo) \
- V(HeapNumber) \
- V(JSObject) \
- V(Map) \
- V(Module) \
- V(MutableHeapNumber) \
- V(Name) \
- V(PropertyCell) \
- V(SharedFunctionInfo) \
- /* Subtypes of Object */ \
- V(HeapObject)
-
-class CompilationDependencies;
-class JSHeapBroker;
-class ObjectData;
-class PerIsolateCompilerCache;
-class PropertyAccessInfo;
-#define FORWARD_DECL(Name) class Name##Ref;
-HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
-#undef FORWARD_DECL
-
-class V8_EXPORT_PRIVATE ObjectRef {
- public:
- ObjectRef(JSHeapBroker* broker, Handle<Object> object);
- ObjectRef(JSHeapBroker* broker, ObjectData* data)
- : data_(data), broker_(broker) {
- CHECK_NOT_NULL(data_);
- }
-
- Handle<Object> object() const;
-
- bool equals(const ObjectRef& other) const;
-
- bool IsSmi() const;
- int AsSmi() const;
-
-#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
-#undef HEAP_IS_METHOD_DECL
-
-#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
-#undef HEAP_AS_METHOD_DECL
-
- bool IsNullOrUndefined() const;
-
- bool BooleanValue() const;
- Maybe<double> OddballToNumber() const;
-
- // Return the element at key {index} if {index} is known to be an own data
- // property of the object that is non-writable and non-configurable.
- base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index,
- bool serialize = false) const;
-
- Isolate* isolate() const;
-
- protected:
- JSHeapBroker* broker() const;
- ObjectData* data() const;
- ObjectData* data_; // Should be used only by object() getters.
-
- private:
- friend class JSArrayData;
- friend class JSGlobalProxyRef;
- friend class JSGlobalProxyData;
- friend class JSObjectData;
- friend class StringData;
-
- friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
-
- JSHeapBroker* broker_;
-};
-
+class BytecodeAnalysis;
+class ObjectRef;
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
-// Temporary class that carries information from a Map. We'd like to remove
-// this class and use MapRef instead, but we can't as long as we support the
-// kDisabled broker mode. That's because obtaining the MapRef via
-// HeapObjectRef::map() requires a HandleScope when the broker is disabled.
-// During OptimizeGraph we generally don't have a HandleScope, however. There
-// are two places where we therefore use GetHeapObjectType() instead. Both that
-// function and this class should eventually be removed.
-class HeapObjectType {
- public:
- enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
-
- using Flags = base::Flags<Flag>;
-
- HeapObjectType(InstanceType instance_type, Flags flags,
- OddballType oddball_type)
- : instance_type_(instance_type),
- oddball_type_(oddball_type),
- flags_(flags) {
- DCHECK_EQ(instance_type == ODDBALL_TYPE,
- oddball_type != OddballType::kNone);
- }
-
- OddballType oddball_type() const { return oddball_type_; }
- InstanceType instance_type() const { return instance_type_; }
- Flags flags() const { return flags_; }
-
- bool is_callable() const { return flags_ & kCallable; }
- bool is_undetectable() const { return flags_ & kUndetectable; }
-
- private:
- InstanceType const instance_type_;
- OddballType const oddball_type_;
- Flags const flags_;
-};
-
-class HeapObjectRef : public ObjectRef {
- public:
- using ObjectRef::ObjectRef;
- Handle<HeapObject> object() const;
-
- MapRef map() const;
-
- // See the comment on the HeapObjectType class.
- HeapObjectType GetHeapObjectType() const;
-};
-
-class PropertyCellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<PropertyCell> object() const;
-
- PropertyDetails property_details() const;
-
- void Serialize();
- ObjectRef value() const;
-};
-
-class JSObjectRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<JSObject> object() const;
-
- uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
- double RawFastDoublePropertyAt(FieldIndex index) const;
- ObjectRef RawFastPropertyAt(FieldIndex index) const;
-
- FixedArrayBaseRef elements() const;
- void EnsureElementsTenured();
- ElementsKind GetElementsKind() const;
-
- void SerializeObjectCreateMap();
- base::Optional<MapRef> GetObjectCreateMap() const;
-};
-
-class JSDataViewRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSDataView> object() const;
-
- size_t byte_length() const;
- size_t byte_offset() const;
-};
-
-class JSBoundFunctionRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSBoundFunction> object() const;
-
- void Serialize();
-
- // The following are available only after calling Serialize().
- ObjectRef bound_target_function() const;
- ObjectRef bound_this() const;
- FixedArrayRef bound_arguments() const;
-};
-
-class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSFunction> object() const;
-
- bool has_feedback_vector() const;
- bool has_initial_map() const;
- bool has_prototype() const;
- bool PrototypeRequiresRuntimeLookup() const;
-
- void Serialize();
- bool serialized() const;
-
- // The following are available only after calling Serialize().
- ObjectRef prototype() const;
- MapRef initial_map() const;
- ContextRef context() const;
- NativeContextRef native_context() const;
- SharedFunctionInfoRef shared() const;
- FeedbackVectorRef feedback_vector() const;
- int InitialMapInstanceSizeWithMinSlack() const;
-
- bool IsSerializedForCompilation() const;
-};
-
-class JSRegExpRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSRegExp> object() const;
-
- ObjectRef raw_properties_or_hash() const;
- ObjectRef data() const;
- ObjectRef source() const;
- ObjectRef flags() const;
- ObjectRef last_index() const;
-};
-
-class HeapNumberRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<HeapNumber> object() const;
-
- double value() const;
-};
-
-class MutableHeapNumberRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<MutableHeapNumber> object() const;
-
- double value() const;
-};
-
-class ContextRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Context> object() const;
-
- void SerializeContextChain();
- ContextRef previous() const;
-
- void SerializeSlot(int index);
- ObjectRef get(int index) const;
-};
-
-#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- V(JSFunction, array_function) \
- V(JSFunction, boolean_function) \
- V(JSFunction, bigint_function) \
- V(JSFunction, number_function) \
- V(JSFunction, object_function) \
- V(JSFunction, promise_function) \
- V(JSFunction, promise_then) \
- V(JSFunction, string_function) \
- V(JSFunction, symbol_function) \
- V(JSGlobalProxy, global_proxy_object) \
- V(JSObject, promise_prototype) \
- V(Map, bound_function_with_constructor_map) \
- V(Map, bound_function_without_constructor_map) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, initial_string_iterator_map) \
- V(Map, iterator_result_map) \
- V(Map, js_array_holey_double_elements_map) \
- V(Map, js_array_holey_elements_map) \
- V(Map, js_array_holey_smi_elements_map) \
- V(Map, js_array_packed_double_elements_map) \
- V(Map, js_array_packed_elements_map) \
- V(Map, js_array_packed_smi_elements_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, slow_object_with_null_prototype_map) \
- V(Map, strict_arguments_map) \
- V(ScriptContextTable, script_context_table) \
- V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
- V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
- V(SharedFunctionInfo, promise_then_finally_shared_fun) \
- V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
-
-// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
-// happened when Turbofan is invoked via --always-opt.
-#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
- V(Map, async_function_object_map) \
- V(Map, map_key_iterator_map) \
- V(Map, map_key_value_iterator_map) \
- V(Map, map_value_iterator_map) \
- V(Map, set_key_value_iterator_map) \
- V(Map, set_value_iterator_map)
-
-#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
-
-class NativeContextRef : public ContextRef {
- public:
- using ContextRef::ContextRef;
- Handle<NativeContext> object() const;
-
- void Serialize();
-
-#define DECL_ACCESSOR(type, name) type##Ref name() const;
- BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- MapRef GetFunctionMapFromIndex(int index) const;
- MapRef GetInitialJSArrayMap(ElementsKind kind) const;
- base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
-};
-
-class NameRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Name> object() const;
-
- bool IsUniqueName() const;
-};
-
-class ScriptContextTableRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<ScriptContextTable> object() const;
-
- struct LookupResult {
- ContextRef context;
- bool immutable;
- int index;
- };
-
- base::Optional<LookupResult> lookup(const NameRef& name) const;
-};
-
-class DescriptorArrayRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<DescriptorArray> object() const;
-};
-
-class FeedbackCellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FeedbackCell> object() const;
-
- HeapObjectRef value() const;
-};
-
-class FeedbackVectorRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FeedbackVector> object() const;
-
- ObjectRef get(FeedbackSlot slot) const;
-
- void SerializeSlots();
-};
-
-class FunctionTemplateInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FunctionTemplateInfo> object() const;
-
- void Serialize();
- ObjectRef call_code() const;
-};
-
-class CallHandlerInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<CallHandlerInfo> object() const;
-
- Address callback() const;
-
- void Serialize();
- ObjectRef data() const;
-};
-
-class AllocationSiteRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<AllocationSite> object() const;
-
- bool PointsToLiteral() const;
- AllocationType GetAllocationType() const;
- ObjectRef nested_site() const;
-
- // {IsFastLiteral} determines whether the given array or object literal
- // boilerplate satisfies all limits to be considered for fast deep-copying
- // and computes the total size of all objects that are part of the graph.
- //
- // If PointsToLiteral() is false, then IsFastLiteral() is also false.
- bool IsFastLiteral() const;
- // We only serialize boilerplate if IsFastLiteral is true.
- base::Optional<JSObjectRef> boilerplate() const;
-
- ElementsKind GetElementsKind() const;
- bool CanInlineCall() const;
-};
-
-class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Map> object() const;
-
- int instance_size() const;
- InstanceType instance_type() const;
- int GetInObjectProperties() const;
- int GetInObjectPropertiesStartInWords() const;
- int NumberOfOwnDescriptors() const;
- int GetInObjectPropertyOffset(int index) const;
- int constructor_function_index() const;
- int NextFreePropertyIndex() const;
- int UnusedPropertyFields() const;
- ElementsKind elements_kind() const;
- bool is_stable() const;
- bool is_extensible() const;
- bool is_constructor() const;
- bool has_prototype_slot() const;
- bool is_access_check_needed() const;
- bool is_deprecated() const;
- bool CanBeDeprecated() const;
- bool CanTransition() const;
- bool IsInobjectSlackTrackingInProgress() const;
- bool is_dictionary_map() const;
- bool IsFixedCowArrayMap() const;
- bool IsPrimitiveMap() const;
- bool is_undetectable() const;
- bool is_callable() const;
- bool has_indexed_interceptor() const;
- bool has_hidden_prototype() const;
- bool is_migration_target() const;
- bool supports_fast_array_iteration() const;
- bool supports_fast_array_resize() const;
- bool IsMapOfCurrentGlobalProxy() const;
-
- OddballType oddball_type() const;
-
-#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
- INSTANCE_TYPE_CHECKERS(DEF_TESTER)
-#undef DEF_TESTER
-
- void SerializeBackPointer();
- HeapObjectRef GetBackPointer() const;
-
- void SerializePrototype();
- bool serialized_prototype() const;
- HeapObjectRef prototype() const;
-
- void SerializeForElementLoad();
-
- void SerializeForElementStore();
- bool HasOnlyStablePrototypesWithFastElements(
- ZoneVector<MapRef>* prototype_maps);
-
- // Concerning the underlying instance_descriptors:
- void SerializeOwnDescriptors();
- void SerializeOwnDescriptor(int descriptor_index);
- MapRef FindFieldOwner(int descriptor_index) const;
- PropertyDetails GetPropertyDetails(int descriptor_index) const;
- NameRef GetPropertyKey(int descriptor_index) const;
- FieldIndex GetFieldIndexFor(int descriptor_index) const;
- ObjectRef GetFieldType(int descriptor_index) const;
- bool IsUnboxedDoubleField(int descriptor_index) const;
-
- // Available after calling JSFunctionRef::Serialize on a function that has
- // this map as initial map.
- ObjectRef GetConstructor() const;
- base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
-};
-
-class FixedArrayBaseRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FixedArrayBase> object() const;
-
- int length() const;
-};
-
-class FixedArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<FixedArray> object() const;
-
- ObjectRef get(int i) const;
-};
-
-class FixedDoubleArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<FixedDoubleArray> object() const;
-
- double get_scalar(int i) const;
- bool is_the_hole(int i) const;
-};
-
-class BytecodeArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<BytecodeArray> object() const;
-
- int register_count() const;
- int parameter_count() const;
- interpreter::Register incoming_new_target_or_generator_register() const;
-};
-
-class JSArrayRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSArray> object() const;
-
- ObjectRef length() const;
-
- // Return the element at key {index} if the array has a copy-on-write elements
- // storage and {index} is known to be an own data property.
- base::Optional<ObjectRef> GetOwnCowElement(uint32_t index,
- bool serialize = false) const;
-};
-
-class ScopeInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<ScopeInfo> object() const;
-
- int ContextLength() const;
-};
-
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(bool, is_safe_to_skip_arguments_adaptor) \
- V(bool, IsInlineable) \
- V(bool, is_compiled)
-
-class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<SharedFunctionInfo> object() const;
-
- int builtin_id() const;
- BytecodeArrayRef GetBytecodeArray() const;
-
-#define DECL_ACCESSOR(type, name) type name() const;
- BROKER_SFI_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
- void SetSerializedForCompilation(FeedbackVectorRef feedback);
-};
-
-class StringRef : public NameRef {
- public:
- using NameRef::NameRef;
- Handle<String> object() const;
-
- int length() const;
- uint16_t GetFirstChar();
- base::Optional<double> ToNumber();
- bool IsSeqString() const;
- bool IsExternalString() const;
-};
-
-class SymbolRef : public NameRef {
- public:
- using NameRef::NameRef;
- Handle<Symbol> object() const;
-};
-
-class JSTypedArrayRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSTypedArray> object() const;
-
- bool is_on_heap() const;
- size_t length() const;
- void* external_pointer() const;
-
- void Serialize();
- bool serialized() const;
-
- HeapObjectRef buffer() const;
-};
-
-class ModuleRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Module> object() const;
-
- void Serialize();
-
- CellRef GetCell(int cell_index) const;
-};
-
-class CellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Cell> object() const;
-
- ObjectRef value() const;
-};
-
-class JSGlobalProxyRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSGlobalProxy> object() const;
-
- // If {serialize} is false:
- // If the property is known to exist as a property cell (on the global
- // object), return that property cell. Otherwise (not known to exist as a
- // property cell or known not to exist as a property cell) return nothing.
- // If {serialize} is true:
- // Like above but potentially access the heap and serialize the necessary
- // information.
- base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name,
- bool serialize = false) const;
-};
-
-class CodeRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Code> object() const;
-};
-
-class InternalizedStringRef : public StringRef {
- public:
- using StringRef::StringRef;
- Handle<InternalizedString> object() const;
-};
-
-class ElementAccessFeedback;
-class NamedAccessFeedback;
-
-class ProcessedFeedback : public ZoneObject {
- public:
- enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
- Kind kind() const { return kind_; }
-
- ElementAccessFeedback const* AsElementAccess() const;
- NamedAccessFeedback const* AsNamedAccess() const;
-
- protected:
- explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
-
- private:
- Kind const kind_;
-};
-
-class InsufficientFeedback final : public ProcessedFeedback {
- public:
- InsufficientFeedback();
-};
-
-class GlobalAccessFeedback : public ProcessedFeedback {
- public:
- explicit GlobalAccessFeedback(PropertyCellRef cell);
- GlobalAccessFeedback(ContextRef script_context, int slot_index,
- bool immutable);
-
- bool IsPropertyCell() const;
- PropertyCellRef property_cell() const;
-
- bool IsScriptContextSlot() const { return !IsPropertyCell(); }
- ContextRef script_context() const;
- int slot_index() const;
- bool immutable() const;
-
- base::Optional<ObjectRef> GetConstantHint() const;
-
- private:
- ObjectRef const cell_or_context_;
- int const index_and_immutable_;
-};
-
-class ElementAccessFeedback : public ProcessedFeedback {
- public:
- explicit ElementAccessFeedback(Zone* zone);
-
- // No transition sources appear in {receiver_maps}.
- // All transition targets appear in {receiver_maps}.
- ZoneVector<Handle<Map>> receiver_maps;
- ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
-
- class MapIterator {
- public:
- bool done() const;
- void advance();
- MapRef current() const;
-
- private:
- friend class ElementAccessFeedback;
-
- explicit MapIterator(ElementAccessFeedback const& processed,
- JSHeapBroker* broker);
-
- ElementAccessFeedback const& processed_;
- JSHeapBroker* const broker_;
- size_t index_ = 0;
- };
-
- // Iterator over all maps: first {receiver_maps}, then transition sources.
- MapIterator all_maps(JSHeapBroker* broker) const;
-};
-
-class NamedAccessFeedback : public ProcessedFeedback {
- public:
- NamedAccessFeedback(NameRef const& name,
- ZoneVector<PropertyAccessInfo> const& access_infos);
-
- NameRef const& name() const { return name_; }
- ZoneVector<PropertyAccessInfo> const& access_infos() const {
- return access_infos_;
- }
-
- private:
- NameRef const name_;
- ZoneVector<PropertyAccessInfo> const access_infos_;
-};
-
struct FeedbackSource {
FeedbackSource(Handle<FeedbackVector> vector_, FeedbackSlot slot_)
: vector(vector_), slot(slot_) {}
@@ -821,26 +49,28 @@ struct FeedbackSource {
};
};
-#define TRACE_BROKER(broker, x) \
- do { \
- if (FLAG_trace_heap_broker_verbose) broker->Trace() << x << '\n'; \
+#define TRACE_BROKER(broker, x) \
+ do { \
+ if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \
+ broker->Trace() << x << '\n'; \
} while (false)
#define TRACE_BROKER_MISSING(broker, x) \
do { \
- if (FLAG_trace_heap_broker) \
+ if (broker->tracing_enabled()) \
broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \
} while (false)
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
- JSHeapBroker(Isolate* isolate, Zone* broker_zone);
+ JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled);
void SetNativeContextRef();
void SerializeStandardObjects();
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return current_zone_; }
+ bool tracing_enabled() const { return tracing_enabled_; }
NativeContextRef native_context() const { return native_context_.value(); }
PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
@@ -875,12 +105,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps);
+ MapHandles const& maps, KeyedAccessMode const& keyed_mode);
GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(
FeedbackSource const& source);
+ BytecodeAnalysis const& GetBytecodeAnalysis(
+ Handle<BytecodeArray> bytecode_array, BailoutId osr_offset,
+ bool analyze_liveness, bool serialize);
+
base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
+ // If there is no result stored for {map}, we return an Invalid
+ // PropertyAccessInfo.
+ PropertyAccessInfo GetAccessInfoForLoadingThen(MapRef map);
+ void CreateAccessInfoForLoadingThen(MapRef map,
+ CompilationDependencies* dependencies);
+ PropertyAccessInfo GetAccessInfoForLoadingExec(MapRef map);
+ PropertyAccessInfo const& CreateAccessInfoForLoadingExec(
+ MapRef map, CompilationDependencies* dependencies);
+
std::ostream& Trace();
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -902,12 +145,19 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Handle<JSObject>::equal_to>
array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
+ bool const tracing_enabled_;
StdoutStream trace_out_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
FeedbackSource::Hash, FeedbackSource::Equal>
feedback_;
+ ZoneUnorderedMap<ObjectData*, BytecodeAnalysis*> bytecode_analyses_;
+ typedef ZoneUnorderedMap<MapRef, PropertyAccessInfo, ObjectRef::Hash,
+ ObjectRef::Equal>
+ MapToAccessInfos;
+ MapToAccessInfos ais_for_loading_then_;
+ MapToAccessInfos ais_for_loading_exec_;
static const size_t kMinimalRefsBucketCount = 8; // must be power of 2
static const size_t kInitialRefsBucketCount = 1024; // must be power of 2
@@ -948,6 +198,23 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
// compilation is finished.
bool CanInlineElementAccess(MapRef const& map);
+class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
+ public:
+ explicit OffHeapBytecodeArray(BytecodeArrayRef bytecode_array);
+
+ int length() const override;
+ int parameter_count() const override;
+ uint8_t get(int index) const override;
+ void set(int index, uint8_t value) override;
+ Address GetFirstBytecodeAddress() const override;
+ Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const override;
+ bool IsConstantAtIndexSmi(int index) const override;
+ Smi GetConstantAtIndexAsSmi(int index) const override;
+
+ private:
+ BytecodeArrayRef array_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index cc48ae80cb..7e7c9e3a0e 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -30,8 +30,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
ObjectRef object(broker(), HeapConstantOf(node->op()));
if (object.IsJSFunction()) object.AsJSFunction().Serialize();
if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap();
- if (object.IsModule()) object.AsModule().Serialize();
- if (object.IsContext()) object.AsContext().SerializeContextChain();
+ if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize();
break;
}
case IrOpcode::kJSCreateArray: {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index f78635b139..e11d6b59a3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -7,6 +7,7 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects/objects-inl.h"
@@ -21,15 +22,9 @@ namespace compiler {
} while (false)
namespace {
-
-bool IsSmallInlineFunction(BytecodeArrayRef bytecode) {
- // Forcibly inline small functions.
- if (bytecode.length() <= FLAG_max_inlined_bytecode_size_small) {
- return true;
- }
- return false;
+bool IsSmall(BytecodeArrayRef bytecode) {
+ return bytecode.length() <= FLAG_max_inlined_bytecode_size_small;
}
-
} // namespace
JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
@@ -65,7 +60,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.functions[n] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
if (function.IsSerializedForCompilation()) {
- out.bytecode[n] = function.shared().GetBytecodeArray(), isolate();
+ out.bytecode[n] = function.shared().GetBytecodeArray();
}
}
out.num_functions = value_input_count;
@@ -91,6 +86,11 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+ if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute &&
+ mode_ != kStressInlining) {
+ return NoChange();
+ }
+
// Check if we already saw that {node} before, and if so, just skip it.
if (seen_.find(node->id()) != seen_.end()) return NoChange();
seen_.insert(node->id());
@@ -107,7 +107,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
return NoChange();
}
- bool can_inline = false, force_inline_small = true;
+ bool can_inline_candidate = false, candidate_is_small = true;
candidate.total_size = 0;
Node* frame_state = NodeProperties::GetFrameStateInput(node);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
@@ -155,15 +155,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// serialized.
BytecodeArrayRef bytecode = candidate.bytecode[i].value();
if (candidate.can_inline_function[i]) {
- can_inline = true;
+ can_inline_candidate = true;
candidate.total_size += bytecode.length();
}
- // We don't force inline small functions if any of them is not inlineable.
- if (!IsSmallInlineFunction(bytecode)) {
- force_inline_small = false;
- }
+ candidate_is_small = candidate_is_small && IsSmall(bytecode);
}
- if (!can_inline) return NoChange();
+ if (!can_inline_candidate) return NoChange();
// Gather feedback on how often this call site has been hit before.
if (node->opcode() == IrOpcode::kJSCall) {
@@ -195,9 +192,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
// Forcibly inline small functions here. In the case of polymorphic inlining
- // force_inline_small is set only when all functions are small.
- if (force_inline_small &&
- cumulative_count_ < FLAG_max_inlined_bytecode_size_absolute) {
+ // candidate_is_small is set only when all functions are small.
+ if (candidate_is_small) {
TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(),
node->op()->mnemonic());
return InlineCandidate(candidate, true);
@@ -221,21 +217,24 @@ void JSInliningHeuristic::Finalize() {
Candidate candidate = *i;
candidates_.erase(i);
+ // Make sure we don't try to inline dead candidate nodes.
+ if (candidate.node->IsDead()) {
+ continue;
+ }
+
// Make sure we have some extra budget left, so that any small functions
// exposed by this function would be given a chance to inline.
double size_of_candidate =
candidate.total_size * FLAG_reserve_inline_budget_scale_factor;
- int total_size = cumulative_count_ + static_cast<int>(size_of_candidate);
+ int total_size =
+ total_inlined_bytecode_size_ + static_cast<int>(size_of_candidate);
if (total_size > FLAG_max_inlined_bytecode_size_cumulative) {
// Try if any smaller functions are available to inline.
continue;
}
- // Make sure we don't try to inline dead candidate nodes.
- if (!candidate.node->IsDead()) {
- Reduction const reduction = InlineCandidate(candidate, false);
- if (reduction.Changed()) return;
- }
+ Reduction const reduction = InlineCandidate(candidate, false);
+ if (reduction.Changed()) return;
}
}
@@ -630,7 +629,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
- cumulative_count_ += candidate.bytecode[0].value().length();
+ total_inlined_bytecode_size_ += candidate.bytecode[0].value().length();
}
return reduction;
}
@@ -688,20 +687,19 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
ReplaceWithValue(node, value, effect, control);
// Inline the individual, cloned call sites.
- for (int i = 0; i < num_calls; ++i) {
- Node* node = calls[i];
+ for (int i = 0; i < num_calls && total_inlined_bytecode_size_ <
+ FLAG_max_inlined_bytecode_size_absolute;
+ ++i) {
if (candidate.can_inline_function[i] &&
- (small_function ||
- cumulative_count_ < FLAG_max_inlined_bytecode_size_cumulative)) {
+ (small_function || total_inlined_bytecode_size_ <
+ FLAG_max_inlined_bytecode_size_cumulative)) {
+ Node* node = calls[i];
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
+ total_inlined_bytecode_size_ += candidate.bytecode[i]->length();
// Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node.
node->Kill();
- // Small functions don't count towards the budget.
- if (!small_function) {
- cumulative_count_ += candidate.bytecode[i]->length();
- }
}
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 99ad258c31..b143e9b67f 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -97,7 +97,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
SourcePositionTable* source_positions_;
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
- int cumulative_count_ = 0;
+ int total_inlined_bytecode_size_ = 0;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index e43e710da7..91cbea2346 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -7,11 +7,13 @@
#include "src/ast/ast.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -466,14 +468,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
AllowHandleAllocation allow_handle_alloc;
AllowHeapAllocation allow_heap_alloc;
AllowCodeDependencyChange allow_code_dep_change;
- Handle<Context> native_context =
- handle(info_->native_context(), isolate());
-
- BuildGraphFromBytecode(broker(), zone(), bytecode_array.object(),
- shared_info.value().object(),
- feedback_vector.object(), BailoutId::None(),
- jsgraph(), call.frequency(), source_positions_,
- native_context, inlining_id, flags);
+ CallFrequency frequency = call.frequency();
+ Handle<NativeContext> native_context(info_->native_context(), isolate());
+ BuildGraphFromBytecode(
+ broker(), zone(), bytecode_array.object(),
+ shared_info.value().object(), feedback_vector.object(),
+ BailoutId::None(), jsgraph(), frequency, source_positions_,
+ native_context, inlining_id, flags, &info_->tick_counter());
}
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 94a9e71b2e..f50f7b591d 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -59,7 +59,8 @@ class JSInliner final : public AdvancedReducer {
SourcePositionTable* const source_positions_;
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
- FeedbackVectorRef DetermineCallContext(Node* node, Node*& context_out);
+ FeedbackVectorRef DetermineCallContext(
+ Node* node, Node*& context_out); // NOLINT(runtime/references)
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 312ab38f51..7d742a5f32 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -33,12 +33,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-// This is needed for gc_mole which will compile this file without the full set
-// of GN defined macros.
-#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64
-#endif
-
namespace {
bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) {
@@ -513,8 +507,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
Node* receiver, Node* effect, Handle<HeapObject> prototype) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
// Try to determine either that all of the {receiver_maps} have the given
@@ -686,6 +680,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// ES section #sec-promise-resolve-functions
Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode());
Node* promise = NodeProperties::GetValueInput(node, 0);
Node* resolution = NodeProperties::GetValueInput(node, 1);
@@ -702,9 +697,17 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- access_info_factory.ComputePropertyAccessInfos(
- resolution_maps, factory()->then_string(), AccessMode::kLoad,
- &access_infos);
+ if (!FLAG_concurrent_inlining) {
+ access_info_factory.ComputePropertyAccessInfos(
+ resolution_maps, factory()->then_string(), AccessMode::kLoad,
+ &access_infos);
+ } else {
+ // Obtain pre-computed access infos from the broker.
+ for (auto map : resolution_maps) {
+ MapRef map_ref(broker(), map);
+ access_infos.push_back(broker()->GetAccessInfoForLoadingThen(map_ref));
+ }
+ }
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
@@ -975,9 +978,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
}
Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
-
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op());
if (!p.feedback().IsValid()) return NoChange();
FeedbackSource source(p.feedback());
@@ -1007,9 +1009,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
-
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op());
@@ -1298,7 +1299,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
- Node* node, Node* value, FeedbackNexus const& nexus, NameRef const& name,
+ Node* node, Node* value, FeedbackSource const& source, NameRef const& name,
AccessMode access_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
@@ -1312,11 +1313,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
}
- return ReducePropertyAccessUsingProcessedFeedback(node, nullptr, name, value,
- nexus, access_mode);
+ return ReducePropertyAccess(node, nullptr, name, value, source, access_mode);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const receiver = NodeProperties::GetValueInput(node, 0);
@@ -1355,56 +1356,47 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
}
- // Extract receiver maps from the load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), nexus, name,
+ return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(),
+ FeedbackSource(p.feedback()), name,
AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(
- node, value, nexus, NameRef(broker(), p.name()), AccessMode::kStore);
+ return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
+ NameRef(broker(), p.name()),
+ AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the creation of a named property based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(node, value, nexus,
+ return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
NameRef(broker(), p.name()),
AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
- Node* node, Node* index, Node* value, AccessMode access_mode,
- KeyedAccessLoadMode load_mode) {
+ Node* node, Node* index, Node* value, KeyedAccessMode const& keyed_mode) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Strings are immutable in JavaScript.
- if (access_mode == AccessMode::kStore) return NoChange();
+ if (keyed_mode.access_mode() == AccessMode::kStore) return NoChange();
// `in` cannot be used on strings.
- if (access_mode == AccessMode::kHas) return NoChange();
+ if (keyed_mode.access_mode() == AccessMode::kHas) return NoChange();
// Ensure that the {receiver} is actually a String.
receiver = effect = graph()->NewNode(
@@ -1416,7 +1408,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
// Load the single character string from {receiver} or yield undefined
// if the {index} is out of bounds (depending on the {load_mode}).
value = BuildIndexedStringLoad(receiver, index, length, &effect, &control,
- load_mode);
+ keyed_mode.load_mode());
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1437,24 +1429,31 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ ElementAccessFeedback const& processed) {
DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
-
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
node->opcode() == IrOpcode::kJSHasProperty);
+
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state =
NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
+ AccessMode access_mode = processed.keyed_mode.access_mode();
+ if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
+ receiver->opcode() == IrOpcode::kHeapConstant) {
+ Reduction reduction = ReduceKeyedLoadFromHeapConstant(
+ node, index, access_mode, processed.keyed_mode.load_mode());
+ if (reduction.Changed()) return reduction;
+ }
+
if (HasOnlyStringMaps(broker(), processed.receiver_maps)) {
DCHECK(processed.transitions.empty());
- return ReduceElementAccessOnString(node, index, value, access_mode,
- load_mode);
+ return ReduceElementAccessOnString(node, index, value,
+ processed.keyed_mode);
}
// Compute element access infos for the receiver maps.
@@ -1485,7 +1484,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// then we need to check that all prototypes have stable maps with
// fast elements (and we need to guard against changes to that below).
if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
- IsGrowStoreMode(store_mode)) &&
+ IsGrowStoreMode(processed.keyed_mode.store_mode())) &&
!receiver_map.HasOnlyStablePrototypesWithFastElements(
&prototype_maps)) {
return NoChange();
@@ -1558,7 +1557,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Access the actual element.
ValueEffectControl continuation =
BuildElementAccess(receiver, index, value, effect, control, access_info,
- access_mode, load_mode, store_mode);
+ processed.keyed_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1591,7 +1590,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
? ElementsTransition::kFastTransition
: ElementsTransition::kSlowTransition,
transition_source.object(), transition_target.object())),
- receiver, effect, control);
+ receiver, this_effect, this_control);
}
// Perform map check(s) on {receiver}.
@@ -1623,9 +1622,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Access the actual element.
- ValueEffectControl continuation = BuildElementAccess(
- this_receiver, this_index, this_value, this_effect, this_control,
- access_info, access_mode, load_mode, store_mode);
+ ValueEffectControl continuation =
+ BuildElementAccess(this_receiver, this_index, this_value, this_effect,
+ this_control, access_info, processed.keyed_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -1659,7 +1658,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
- Node* node, Node* key, FeedbackNexus const& nexus, AccessMode access_mode,
+ Node* node, Node* key, AccessMode access_mode,
KeyedAccessLoadMode load_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSHasProperty);
@@ -1715,54 +1714,24 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
// accesses using the known length, which doesn't change.
if (receiver_ref.IsString()) {
DCHECK_NE(access_mode, AccessMode::kHas);
- // We can only assume that the {index} is a valid array index if the
- // IC is in element access mode and not MEGAMORPHIC, otherwise there's
- // no guard for the bounds check below.
- if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
- // Ensure that {key} is less than {receiver} length.
- Node* length = jsgraph()->Constant(receiver_ref.AsString().length());
-
- // Load the single character string from {receiver} or yield
- // undefined if the {key} is out of bounds (depending on the
- // {load_mode}).
- Node* value = BuildIndexedStringLoad(receiver, key, length, &effect,
- &control, load_mode);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
+ // Ensure that {key} is less than {receiver} length.
+ Node* length = jsgraph()->Constant(receiver_ref.AsString().length());
- return NoChange();
-}
-
-Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
- Node* node, Node* key, Node* value, FeedbackNexus const& nexus,
- AccessMode access_mode, KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty ||
- node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
- node->opcode() == IrOpcode::kJSHasProperty);
-
- Node* receiver = NodeProperties::GetValueInput(node, 0);
-
- if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
- receiver->opcode() == IrOpcode::kHeapConstant) {
- Reduction reduction = ReduceKeyedLoadFromHeapConstant(
- node, key, nexus, access_mode, load_mode);
- if (reduction.Changed()) return reduction;
+ // Load the single character string from {receiver} or yield
+ // undefined if the {key} is out of bounds (depending on the
+ // {load_mode}).
+ Node* value = BuildIndexedStringLoad(receiver, key, length, &effect,
+ &control, load_mode);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
- return ReducePropertyAccessUsingProcessedFeedback(node, key, base::nullopt,
- value, nexus, access_mode,
- load_mode, store_mode);
+ return NoChange();
}
-Reduction
-JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
+Reduction JSNativeContextSpecialization::ReducePropertyAccess(
Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
- FeedbackNexus const& nexus, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ FeedbackSource const& source, AccessMode access_mode) {
DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
@@ -1777,11 +1746,12 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
ProcessedFeedback const* processed = nullptr;
if (FLAG_concurrent_inlining) {
- processed = broker()->GetFeedback(FeedbackSource(nexus));
+ processed = broker()->GetFeedback(source);
// TODO(neis): Infer maps from the graph and consolidate with feedback/hints
// and filter impossible candidates based on inferred root map.
} else {
// TODO(neis): Try to unify this with the similar code in the serializer.
+ FeedbackNexus nexus(source.vector, source.slot);
if (nexus.ic_state() == UNINITIALIZED) {
processed = new (zone()) InsufficientFeedback();
} else {
@@ -1801,8 +1771,8 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
processed = new (zone()) NamedAccessFeedback(*name, access_infos);
} else if (nexus.GetKeyType() == ELEMENT &&
MEGAMORPHIC != nexus.ic_state()) {
- processed =
- broker()->ProcessFeedbackMapsForElementAccess(receiver_maps);
+ processed = broker()->ProcessFeedbackMapsForElementAccess(
+ receiver_maps, KeyedAccessMode::FromNexus(nexus));
}
}
}
@@ -1818,9 +1788,10 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
return ReduceNamedAccess(node, value, *processed->AsNamedAccess(),
access_mode, key);
case ProcessedFeedback::kElementAccess:
+ CHECK_EQ(processed->AsElementAccess()->keyed_mode.access_mode(),
+ access_mode);
return ReduceElementAccess(node, key, value,
- *processed->AsElementAccess(), access_mode,
- load_mode, store_mode);
+ *processed->AsElementAccess());
case ProcessedFeedback::kGlobalAccess:
UNREACHABLE();
}
@@ -1846,21 +1817,15 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
}
Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* key = NodeProperties::GetValueInput(node, 1);
Node* value = jsgraph()->Dead();
- // Extract receiver maps from the has property IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access load mode from the keyed load IC.
- KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
-
- // Try to lower the keyed access based on the {nexus}.
- return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kHas, load_mode,
- STANDARD_STORE);
+ return ReducePropertyAccess(node, key, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kHas);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
@@ -1970,6 +1935,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
}
Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* name = NodeProperties::GetValueInput(node, 1);
@@ -1979,62 +1945,49 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
if (reduction.Changed()) return reduction;
}
- // Extract receiver maps from the keyed load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access load mode from the keyed load IC.
- KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
-
- // Try to lower the keyed access based on the {nexus}.
Node* value = jsgraph()->Dead();
- return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad,
- load_mode, STANDARD_STORE);
+ return ReducePropertyAccess(node, name, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* const key = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access store mode from the keyed store IC.
- KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
-
- // Try to lower the keyed access based on the {nexus}.
- return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kStore,
- STANDARD_LOAD, store_mode);
+ return ReducePropertyAccess(node, key, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kStore);
}
Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node* receiver, Node* context, Node* frame_state, Node** effect,
Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
- Node* target = jsgraph()->Constant(access_info.constant());
+ ObjectRef constant(broker(), access_info.constant());
+ Node* target = jsgraph()->Constant(constant);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the getter function.
Node* value;
- ObjectRef constant(broker(), access_info.constant());
if (constant.IsJSFunction()) {
value = *effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state, *effect, *control);
} else {
- auto function_template_info = constant.AsFunctionTemplateInfo();
- function_template_info.Serialize();
- Node* holder =
- access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ Node* holder = access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(ObjectRef(
+ broker(), access_info.holder().ToHandleChecked()));
SharedFunctionInfoRef shared_info(
broker(), frame_info.shared_info().ToHandleChecked());
- value = InlineApiCall(receiver, holder, frame_state, nullptr, effect,
- control, shared_info, function_template_info);
+
+ value =
+ InlineApiCall(receiver, holder, frame_state, nullptr, effect, control,
+ shared_info, constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2052,26 +2005,24 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* receiver, Node* value, Node* context, Node* frame_state,
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
- Node* target = jsgraph()->Constant(access_info.constant());
+ ObjectRef constant(broker(), access_info.constant());
+ Node* target = jsgraph()->Constant(constant);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the setter function.
- ObjectRef constant(broker(), access_info.constant());
if (constant.IsJSFunction()) {
*effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state, *effect, *control);
} else {
- auto function_template_info = constant.AsFunctionTemplateInfo();
- function_template_info.Serialize();
- Node* holder =
- access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ Node* holder = access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(ObjectRef(
+ broker(), access_info.holder().ToHandleChecked()));
SharedFunctionInfoRef shared_info(
broker(), frame_info.shared_info().ToHandleChecked());
InlineApiCall(receiver, holder, frame_state, value, effect, control,
- shared_info, function_template_info);
+ shared_info, constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2088,8 +2039,16 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
Node** control, SharedFunctionInfoRef const& shared_info,
FunctionTemplateInfoRef const& function_template_info) {
- auto call_handler_info =
- function_template_info.call_code().AsCallHandlerInfo();
+ if (!function_template_info.has_call_code()) {
+ return nullptr;
+ }
+
+ if (!function_template_info.call_code().has_value()) {
+ TRACE_BROKER_MISSING(broker(), "call code for function template info "
+ << function_template_info);
+ return nullptr;
+ }
+ CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
@@ -2151,7 +2110,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
value = InlinePropertyGetterCall(receiver, context, frame_state, &effect,
&control, if_exceptions, access_info);
} else if (access_info.IsModuleExport()) {
- Node* cell = jsgraph()->Constant(access_info.export_cell());
+ Node* cell = jsgraph()->Constant(
+ ObjectRef(broker(), access_info.constant()).AsCell());
value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -2382,7 +2342,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
// Check if we need to grow the properties backing store
// with this transitioning store.
MapRef transition_map_ref(broker(), transition_map);
- transition_map_ref.SerializeBackPointer();
MapRef original_map = transition_map_ref.GetBackPointer().AsMap();
if (original_map.UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
@@ -2404,7 +2363,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
common()->BeginRegion(RegionObservability::kObservable), effect);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->Constant(transition_map), effect, control);
+ jsgraph()->Constant(transition_map_ref), effect, control);
effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
value, effect, control);
effect = graph()->NewNode(common()->FinishRegion(),
@@ -2495,21 +2454,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access store mode from the keyed store IC.
- KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
-
- return ReduceKeyedAccess(node, index, value, nexus,
- AccessMode::kStoreInLiteral, STANDARD_LOAD,
- store_mode);
+ return ReducePropertyAccess(node, index, base::nullopt, value,
+ FeedbackSource(p.feedback()),
+ AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
@@ -2546,8 +2500,7 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- ElementAccessInfo const& access_info, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ ElementAccessInfo const& access_info, KeyedAccessMode const& keyed_mode) {
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
@@ -2583,7 +2536,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// for Chrome. Node and Electron both set this limit to 0. Setting
// the base to Smi zero here allows the EffectControlLinearizer to
// optimize away the tricky part of the access later.
- if (V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP == 0) {
+ if (JSTypedArray::kMaxSizeInHeap == 0) {
base_pointer = jsgraph()->ZeroConstant();
} else {
base_pointer = effect =
@@ -2629,8 +2582,10 @@ JSNativeContextSpecialization::BuildElementAccess(
buffer_or_receiver = buffer;
}
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
+ if ((keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) ||
+ (keyed_mode.IsStore() &&
+ keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) {
// Only check that the {index} is in SignedSmall range. We do the actual
// bounds check below and just skip the property access if it's out of
// bounds for the {receiver}.
@@ -2651,10 +2606,10 @@ JSNativeContextSpecialization::BuildElementAccess(
// Access the actual element.
ExternalArrayType external_array_type =
GetArrayTypeFromElementsKind(elements_kind);
- switch (access_mode) {
+ switch (keyed_mode.access_mode()) {
case AccessMode::kLoad: {
// Check if we can return undefined for out-of-bounds loads.
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) {
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(
@@ -2716,7 +2671,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if we can skip the out-of-bounds store.
- if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
+ if (keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -2766,9 +2721,9 @@ JSNativeContextSpecialization::BuildElementAccess(
// Don't try to store to a copy-on-write backing store (unless supported by
// the store mode).
- if (access_mode == AccessMode::kStore &&
+ if (keyed_mode.access_mode() == AccessMode::kStore &&
IsSmiOrObjectElementsKind(elements_kind) &&
- !IsCOWHandlingStoreMode(store_mode)) {
+ !IsCOWHandlingStoreMode(keyed_mode.store_mode())) {
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
@@ -2791,11 +2746,10 @@ JSNativeContextSpecialization::BuildElementAccess(
elements, effect, control);
// Check if we might need to grow the {elements} backing store.
- if (IsGrowStoreMode(store_mode)) {
+ if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) {
// For growing stores we validate the {index} below.
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
- } else if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ } else if (keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
CanTreatHoleAsUndefined(receiver_maps)) {
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
@@ -2826,7 +2780,7 @@ JSNativeContextSpecialization::BuildElementAccess(
kFullWriteBarrier, LoadSensitivity::kCritical};
// Access the actual element.
- if (access_mode == AccessMode::kLoad) {
+ if (keyed_mode.access_mode() == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
if (IsHoleyElementsKind(elements_kind)) {
@@ -2839,7 +2793,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if we can return undefined for out-of-bounds loads.
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
@@ -2923,7 +2877,7 @@ JSNativeContextSpecialization::BuildElementAccess(
effect, control);
}
}
- } else if (access_mode == AccessMode::kHas) {
+ } else if (keyed_mode.access_mode() == AccessMode::kHas) {
// For packed arrays with NoElementsProctector valid, a bound check
// is equivalent to HasProperty.
value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
@@ -2996,8 +2950,9 @@ JSNativeContextSpecialization::BuildElementAccess(
vtrue, vfalse, control);
}
} else {
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
+ DCHECK(keyed_mode.access_mode() == AccessMode::kStore ||
+ keyed_mode.access_mode() == AccessMode::kStoreInLiteral);
+
if (IsSmiElementsKind(elements_kind)) {
value = effect = graph()->NewNode(
simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
@@ -3011,11 +2966,11 @@ JSNativeContextSpecialization::BuildElementAccess(
// Ensure that copy-on-write backing store is writable.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_HANDLE_COW) {
+ keyed_mode.store_mode() == STORE_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
- } else if (IsGrowStoreMode(store_mode)) {
+ } else if (IsGrowStoreMode(keyed_mode.store_mode())) {
// Determine the length of the {elements} backing store.
Node* elements_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
@@ -3053,7 +3008,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// If we didn't grow {elements}, it might still be COW, in which case we
// copy it now.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_AND_GROW_HANDLE_COW) {
+ keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
@@ -3295,7 +3250,8 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
Node* receiver, Node* effect, MapHandles* receiver_maps) {
ZoneHandleSet<Map> maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect, &maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
+ &maps);
if (result == NodeProperties::kReliableReceiverMaps) {
for (size_t i = 0; i < maps.size(); ++i) {
receiver_maps->push_back(maps[i]);
@@ -3357,8 +3313,6 @@ SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
return jsgraph()->simplified();
}
-#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 7de2639966..8510c76bfc 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/objects/map.h"
@@ -93,24 +94,15 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode);
+ ElementAccessFeedback const& processed);
// In the case of non-keyed (named) accesses, pass the name as {static_name}
// and use {nullptr} for {key} (load/store modes are irrelevant).
- Reduction ReducePropertyAccessUsingProcessedFeedback(
- Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
- FeedbackNexus const& nexus, AccessMode access_mode,
- KeyedAccessLoadMode load_mode = STANDARD_LOAD,
- KeyedAccessStoreMode store_mode = STANDARD_STORE);
- Reduction ReduceKeyedAccess(Node* node, Node* key, Node* value,
- FeedbackNexus const& nexus,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode);
+ Reduction ReducePropertyAccess(Node* node, Node* key,
+ base::Optional<NameRef> static_name,
+ Node* value, FeedbackSource const& source,
+ AccessMode access_mode);
Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
- FeedbackNexus const& nexus,
+ FeedbackSource const& source,
NameRef const& name,
AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
@@ -123,12 +115,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
NameRef const& name, AccessMode access_mode,
Node* key, PropertyCellRef const& property_cell);
Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* key,
- FeedbackNexus const& nexus,
AccessMode access_mode,
KeyedAccessLoadMode load_mode);
Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode);
+ KeyedAccessMode const& keyed_mode);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
Reduction ReduceJSToString(Node* node);
@@ -197,10 +187,11 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
FunctionTemplateInfoRef const& function_template_info);
// Construct the appropriate subgraph for element access.
- ValueEffectControl BuildElementAccess(
- Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- ElementAccessInfo const& access_info, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode);
+ ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
+ Node* value, Node* effect,
+ Node* control,
+ ElementAccessInfo const& access_info,
+ KeyedAccessMode const& keyed_mode);
// Construct appropriate subgraph to load from a String.
Node* BuildIndexedStringLoad(Node* receiver, Node* index, Node* length,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index a779790b8d..e0f97922b2 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-std::ostream& operator<<(std::ostream& os, CallFrequency f) {
+std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
if (f.IsUnknown()) return os << "unknown";
return os << f.value();
}
@@ -28,7 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) {
return OpParameter<CallFrequency>(op);
}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
return os << p.arity() << ", " << p.start_index();
@@ -843,7 +842,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity,
parameters); // parameter
}
-const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
+const Operator* JSOperatorBuilder::CallWithArrayLike(
+ CallFrequency const& frequency) {
return new (zone()) Operator1<CallFrequency>( // --
IrOpcode::kJSCallWithArrayLike, Operator::kNoProperties, // opcode
"JSCallWithArrayLike", // name
@@ -899,8 +899,10 @@ const Operator* JSOperatorBuilder::ConstructForwardVarargs(
parameters); // parameter
}
+// Note: frequency is taken by reference to work around a GCC bug
+// on AIX (v8:8193).
const Operator* JSOperatorBuilder::Construct(uint32_t arity,
- CallFrequency frequency,
+ CallFrequency const& frequency,
VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
@@ -911,7 +913,7 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity,
}
const Operator* JSOperatorBuilder::ConstructWithArrayLike(
- CallFrequency frequency) {
+ CallFrequency const& frequency) {
return new (zone()) Operator1<CallFrequency>( // --
IrOpcode::kJSConstructWithArrayLike, // opcode
Operator::kNoProperties, // properties
@@ -921,7 +923,8 @@ const Operator* JSOperatorBuilder::ConstructWithArrayLike(
}
const Operator* JSOperatorBuilder::ConstructWithSpread(
- uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) {
+ uint32_t arity, CallFrequency const& frequency,
+ VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 0f315b1cb5..e7d9acb152 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -48,7 +48,7 @@ class CallFrequency final {
}
bool operator!=(CallFrequency const& that) const { return !(*this == that); }
- friend size_t hash_value(CallFrequency f) {
+ friend size_t hash_value(CallFrequency const& f) {
return bit_cast<uint32_t>(f.value_);
}
@@ -58,7 +58,7 @@ class CallFrequency final {
float value_;
};
-std::ostream& operator<<(std::ostream&, CallFrequency);
+std::ostream& operator<<(std::ostream&, CallFrequency const&);
CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT;
@@ -101,7 +101,7 @@ ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
// used as a parameter by JSConstruct and JSConstructWithSpread operators.
class ConstructParameters final {
public:
- ConstructParameters(uint32_t arity, CallFrequency frequency,
+ ConstructParameters(uint32_t arity, CallFrequency const& frequency,
VectorSlotPair const& feedback)
: arity_(arity), frequency_(frequency), feedback_(feedback) {}
@@ -757,7 +757,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
- const Operator* CallWithArrayLike(CallFrequency frequency);
+ const Operator* CallWithArrayLike(CallFrequency const& frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
@@ -768,11 +768,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Construct(uint32_t arity,
- CallFrequency frequency = CallFrequency(),
+ CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
- const Operator* ConstructWithArrayLike(CallFrequency frequency);
+ const Operator* ConstructWithArrayLike(CallFrequency const& frequency);
const Operator* ConstructWithSpread(
- uint32_t arity, CallFrequency frequency = CallFrequency(),
+ uint32_t arity, CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
const Operator* LoadProperty(VectorSlotPair const& feedback);
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 9d882e8238..f3696bcc48 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -44,6 +44,25 @@ bool BinaryOperationHintToNumberOperationHint(
return false;
}
+bool BinaryOperationHintToBigIntOperationHint(
+ BinaryOperationHint binop_hint, BigIntOperationHint* bigint_hint) {
+ switch (binop_hint) {
+ case BinaryOperationHint::kSignedSmall:
+ case BinaryOperationHint::kSignedSmallInputs:
+ case BinaryOperationHint::kSigned32:
+ case BinaryOperationHint::kNumber:
+ case BinaryOperationHint::kNumberOrOddball:
+ case BinaryOperationHint::kAny:
+ case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kString:
+ return false;
+ case BinaryOperationHint::kBigInt:
+ *bigint_hint = BigIntOperationHint::kBigInt;
+ return true;
+ }
+ UNREACHABLE();
+}
+
} // namespace
class JSSpeculativeBinopBuilder final {
@@ -74,6 +93,11 @@ class JSSpeculativeBinopBuilder final {
hint);
}
+ bool GetBinaryBigIntOperationHint(BigIntOperationHint* hint) {
+ return BinaryOperationHintToBigIntOperationHint(GetBinaryOperationHint(),
+ hint);
+ }
+
bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
switch (GetCompareOperationHint()) {
case CompareOperationHint::kSignedSmall:
@@ -138,6 +162,16 @@ class JSSpeculativeBinopBuilder final {
UNREACHABLE();
}
+ const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) {
+ switch (op_->opcode()) {
+ case IrOpcode::kJSAdd:
+ return simplified()->SpeculativeBigIntAdd(hint);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ }
+
const Operator* SpeculativeCompareOp(NumberOperationHint hint) {
switch (op_->opcode()) {
case IrOpcode::kJSEqual:
@@ -179,6 +213,16 @@ class JSSpeculativeBinopBuilder final {
return nullptr;
}
+ Node* TryBuildBigIntBinop() {
+ BigIntOperationHint hint;
+ if (GetBinaryBigIntOperationHint(&hint)) {
+ const Operator* op = SpeculativeBigIntOp(hint);
+ Node* node = BuildSpeculativeOperation(op);
+ return node;
+ }
+ return nullptr;
+ }
+
Node* TryBuildNumberCompare() {
NumberOperationHint hint;
if (GetCompareNumberOperationHint(&hint)) {
@@ -264,6 +308,15 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
operand, jsgraph()->SmiConstant(-1), effect,
control, slot);
node = b.TryBuildNumberBinop();
+ if (!node) {
+ FeedbackNexus nexus(feedback_vector(), slot);
+ if (nexus.GetBinaryOperationFeedback() ==
+ BinaryOperationHint::kBigInt) {
+ const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate(
+ BigIntOperationHint::kBigInt);
+ node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ }
+ }
break;
}
default:
@@ -345,6 +398,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
if (Node* node = b.TryBuildNumberBinop()) {
return LoweringResult::SideEffectFree(node, node, control);
}
+ if (op->opcode() == IrOpcode::kJSAdd) {
+ if (Node* node = b.TryBuildBigIntBinop()) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ }
+ }
break;
}
case IrOpcode::kJSExponentiate: {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 7164a0b708..a74c019355 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -153,7 +153,8 @@ class JSTypeHintLowering {
private:
friend class JSSpeculativeBinopBuilder;
- Node* TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, Node* control,
+ Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references)
+ Node* effect, Node* control,
DeoptimizeReason reson) const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index ba50b75792..3190fc9930 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -10,6 +10,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -1364,20 +1365,21 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
Type module_type = NodeProperties::GetType(module);
if (module_type.IsHeapConstant()) {
- ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule();
+ SourceTextModuleRef module_constant =
+ module_type.AsHeapConstant()->Ref().AsSourceTextModule();
CellRef cell_constant = module_constant.GetCell(cell_index);
return jsgraph()->Constant(cell_constant);
}
FieldAccess field_access;
int index;
- if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport) {
+ if (SourceTextModuleDescriptor::GetCellIndexKind(cell_index) ==
+ SourceTextModuleDescriptor::kExport) {
field_access = AccessBuilder::ForModuleRegularExports();
index = cell_index - 1;
} else {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kImport);
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kImport);
field_access = AccessBuilder::ForModuleRegularImports();
index = -cell_index - 1;
}
@@ -1408,9 +1410,9 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* value = NodeProperties::GetValueInput(node, 1);
- DCHECK_EQ(
- ModuleDescriptor::GetCellIndexKind(OpParameter<int32_t>(node->op())),
- ModuleDescriptor::kExport);
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(
+ OpParameter<int32_t>(node->op())),
+ SourceTextModuleDescriptor::kExport);
Node* cell = BuildGetModuleCell(node);
if (cell->op()->EffectOutputCount() > 0) effect = cell;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 8bb47b43e9..1d88a27a5f 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -137,13 +137,19 @@ bool CallDescriptor::CanTailCall(const Node* node) const {
return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
}
-int CallDescriptor::CalculateFixedFrameSize() const {
+// TODO(jkummerow, sigurds): Arguably frame size calculation should be
+// keyed on code/frame type, not on CallDescriptor kind. Think about a
+// good way to organize this logic.
+int CallDescriptor::CalculateFixedFrameSize(Code::Kind code_kind) const {
switch (kind_) {
case kCallJSFunction:
return PushArgumentCount()
? OptimizedBuiltinFrameConstants::kFixedSlotCount
: StandardFrameConstants::kFixedSlotCount;
case kCallAddress:
+ if (code_kind == Code::C_WASM_ENTRY) {
+ return CWasmEntryFrameConstants::kFixedSlotCount;
+ }
return CommonFrameConstants::kFixedSlotCountAboveFp +
CommonFrameConstants::kCPSlotCount;
case kCallCodeObject:
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index e4fa6f9f20..05eb0e7d11 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -325,7 +325,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool CanTailCall(const Node* call) const;
- int CalculateFixedFrameSize() const;
+ int CalculateFixedFrameSize(Code::Kind code_kind) const;
RegList AllocatableRegisters() const { return allocatable_registers_; }
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index c42bfd839a..f9998723f3 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -419,14 +419,15 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
}
void LoadElimination::AbstractState::FieldsMerge(
- AbstractFields& this_fields, AbstractFields const& that_fields,
+ AbstractFields* this_fields, AbstractFields const& that_fields,
Zone* zone) {
- for (size_t i = 0; i < this_fields.size(); ++i) {
- if (this_fields[i]) {
+ for (size_t i = 0; i < this_fields->size(); ++i) {
+ AbstractField const*& this_field = (*this_fields)[i];
+ if (this_field) {
if (that_fields[i]) {
- this_fields[i] = this_fields[i]->Merge(that_fields[i], zone);
+ this_field = this_field->Merge(that_fields[i], zone);
} else {
- this_fields[i] = nullptr;
+ this_field = nullptr;
}
}
}
@@ -442,8 +443,8 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
// Merge the information we have about the fields.
- FieldsMerge(this->fields_, that->fields_, zone);
- FieldsMerge(this->const_fields_, that->const_fields_, zone);
+ FieldsMerge(&this->fields_, that->fields_, zone);
+ FieldsMerge(&this->const_fields_, that->const_fields_, zone);
// Merge the information we have about the maps.
if (this->maps_) {
@@ -923,20 +924,23 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
FieldInfo const* lookup_result =
state->LookupField(object, field_index, constness);
- if (lookup_result && constness == PropertyConstness::kMutable) {
+ if (lookup_result && (constness == PropertyConstness::kMutable ||
+ V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) {
// At runtime, we should never encounter
// - any store replacing existing info with a different, incompatible
// representation, nor
// - two consecutive const stores.
// However, we may see such code statically, so we guard against
// executing it by emitting Unreachable.
- // TODO(gsps): Re-enable the double const store check once we have
- // identified other FieldAccesses that should be marked mutable
- // instead of const (cf. JSCreateLowering::AllocateFastLiteral).
+ // TODO(gsps): Re-enable the double const store check even for
+ // non-debug builds once we have identified other FieldAccesses
+ // that should be marked mutable instead of const
+ // (cf. JSCreateLowering::AllocateFastLiteral).
bool incompatible_representation =
!lookup_result->name.is_null() &&
!IsCompatible(representation, lookup_result->representation);
- if (incompatible_representation) {
+ if (incompatible_representation ||
+ constness == PropertyConstness::kConst) {
Node* control = NodeProperties::GetControlInput(node);
Node* unreachable =
graph()->NewNode(common()->Unreachable(), effect, control);
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 7658d01365..4ad1fa64a2 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -233,7 +233,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
bool FieldsEquals(AbstractFields const& this_fields,
AbstractFields const& that_fields) const;
- void FieldsMerge(AbstractFields& this_fields,
+ void FieldsMerge(AbstractFields* this_fields,
AbstractFields const& that_fields, Zone* zone);
AbstractElements const* elements_ = nullptr;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index d6b88b13f5..41d50549b3 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -4,6 +4,7 @@
#include "src/compiler/loop-analysis.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
@@ -12,6 +13,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
#define OFFSET(x) ((x)&0x1F)
@@ -51,7 +55,8 @@ struct TempLoopInfo {
// marks on edges into/out-of the loop header nodes.
class LoopFinderImpl {
public:
- LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone)
+ LoopFinderImpl(Graph* graph, LoopTree* loop_tree, TickCounter* tick_counter,
+ Zone* zone)
: zone_(zone),
end_(graph->end()),
queue_(zone),
@@ -63,7 +68,8 @@ class LoopFinderImpl {
loops_found_(0),
width_(0),
backward_(nullptr),
- forward_(nullptr) {}
+ forward_(nullptr),
+ tick_counter_(tick_counter) {}
void Run() {
PropagateBackward();
@@ -116,6 +122,7 @@ class LoopFinderImpl {
int width_;
uint32_t* backward_;
uint32_t* forward_;
+ TickCounter* const tick_counter_;
int num_nodes() {
return static_cast<int>(loop_tree_->node_to_loop_num_.size());
@@ -183,6 +190,7 @@ class LoopFinderImpl {
Queue(end_);
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
info(node);
queue_.pop_front();
@@ -301,6 +309,7 @@ class LoopFinderImpl {
}
// Propagate forward on paths that were backward reachable from backedges.
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop_front();
queued_.Set(node, false);
@@ -512,11 +521,11 @@ class LoopFinderImpl {
}
};
-
-LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
+LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
+ Zone* zone) {
LoopTree* loop_tree =
new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone());
- LoopFinderImpl finder(graph, loop_tree, zone);
+ LoopFinderImpl finder(graph, loop_tree, tick_counter, zone);
finder.Run();
if (FLAG_trace_turbo_loop) {
finder.Print();
@@ -524,7 +533,6 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
return loop_tree;
}
-
Node* LoopTree::HeaderNode(Loop* loop) {
Node* first = *HeaderNodes(loop).begin();
if (first->opcode() == IrOpcode::kLoop) return first;
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 620a9554e0..043833a54c 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -13,6 +13,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// TODO(titzer): don't assume entry edges have a particular index.
@@ -156,7 +159,8 @@ class LoopTree : public ZoneObject {
class V8_EXPORT_PRIVATE LoopFinder {
public:
// Build a loop tree for the entire graph.
- static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
+ static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter,
+ Zone* temp_zone);
};
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index f8e78b2169..80205f80b6 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -240,6 +240,7 @@ class MachineRepresentationInferrer {
MachineType::PointerRepresentation();
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -428,6 +429,7 @@ class MachineRepresentationChecker {
MachineRepresentation::kWord64);
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
case IrOpcode::kTaggedPoisonOnSpeculation:
CheckValueInputIsTagged(node, 0);
break;
@@ -556,7 +558,7 @@ class MachineRepresentationChecker {
case IrOpcode::kParameter:
case IrOpcode::kProjection:
break;
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
@@ -700,6 +702,7 @@ class MachineRepresentationChecker {
case IrOpcode::kThrow:
case IrOpcode::kTypedStateValues:
case IrOpcode::kFrameState:
+ case IrOpcode::kStaticAssert:
break;
default:
if (node->op()->ValueInputCount() != 0) {
@@ -748,6 +751,11 @@ class MachineRepresentationChecker {
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressedSigned:
return;
+ case MachineRepresentation::kNone:
+ if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
+ return;
+ }
+ break;
default:
break;
}
@@ -851,6 +859,9 @@ class MachineRepresentationChecker {
case MachineRepresentation::kCompressedPointer:
return;
case MachineRepresentation::kNone: {
+ if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
+ return;
+ }
std::ostringstream str;
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index a6a8e87cf4..f720c29084 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -710,7 +710,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64Compare(node);
case IrOpcode::kFloat64RoundDown:
return ReduceFloat64RoundDown(node);
- case IrOpcode::kBitcastTaggedToWord: {
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord: {
NodeMatcher m(node->InputAt(0));
if (m.IsBitcastWordToTaggedSigned()) {
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index d2ddedc8fa..f447861aca 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -140,6 +140,7 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -244,6 +245,13 @@ MachineType AtomicOpType(Operator const* op) {
V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
+ V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -261,6 +269,17 @@ MachineType AtomicOpType(Operator const* op) {
V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2Add, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
@@ -338,6 +357,8 @@ MachineType AtomicOpType(Operator const* op) {
V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
+ V(S1x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
@@ -439,12 +460,15 @@ MachineType AtomicOpType(Operator const* op) {
V(Exchange)
#define SIMD_LANE_OP_LIST(V) \
+ V(F64x2, 2) \
V(F32x4, 4) \
+ V(I64x2, 2) \
V(I32x4, 4) \
V(I16x8, 8) \
V(I8x16, 16)
#define SIMD_FORMAT_LIST(V) \
+ V(64x2, 64) \
V(32x4, 32) \
V(16x8, 16) \
V(8x16, 8)
@@ -754,6 +778,14 @@ struct MachineOperatorGlobalCache {
};
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
+ struct MemoryBarrierOperator : public Operator {
+ MemoryBarrierOperator()
+ : Operator(IrOpcode::kMemoryBarrier,
+ Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
+ 1, 1, 0, 1, 0) {}
+ };
+ MemoryBarrierOperator kMemoryBarrier;
+
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
@@ -807,12 +839,12 @@ struct MachineOperatorGlobalCache {
};
Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
- struct DebugAbortOperator : public Operator {
- DebugAbortOperator()
- : Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1,
- 1, 1, 0, 1, 0) {}
+ struct AbortCSAAssertOperator : public Operator {
+ AbortCSAAssertOperator()
+ : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
+ "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
};
- DebugAbortOperator kDebugAbort;
+ AbortCSAAssertOperator kAbortCSAAssert;
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
@@ -1005,8 +1037,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
return &cache_.kBitcastMaybeObjectToWord;
}
-const Operator* MachineOperatorBuilder::DebugAbort() {
- return &cache_.kDebugAbort;
+const Operator* MachineOperatorBuilder::AbortCSAAssert() {
+ return &cache_.kAbortCSAAssert;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
@@ -1017,6 +1049,10 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
+const Operator* MachineOperatorBuilder::MemBarrier() {
+ return &cache_.kMemoryBarrier;
+}
+
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
LoadRepresentation rep) {
#define LOAD(Type) \
@@ -1300,6 +1336,11 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
2, 0, 0, 1, 0, 0, array);
}
+const uint8_t* S8x16ShuffleOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kS8x16Shuffle, op->opcode());
+ return OpParameter<uint8_t*>(op);
+}
+
#undef PURE_BINARY_OP_LIST_32
#undef PURE_BINARY_OP_LIST_64
#undef MACHINE_PURE_OP_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 8b1250dd30..0f81301206 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -112,6 +112,9 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op)
+ V8_WARN_UNUSED_RESULT;
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -216,7 +219,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
AlignmentRequirements::FullUnalignedAccessSupport());
const Operator* Comment(const char* msg);
- const Operator* DebugAbort();
+ const Operator* AbortCSAAssert();
const Operator* DebugBreak();
const Operator* UnsafePointerAdd();
@@ -295,9 +298,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
- // This operator reinterprets the bits of a tagged pointer as word.
+ // This operator reinterprets the bits of a tagged pointer as a word.
const Operator* BitcastTaggedToWord();
+ // This operator reinterprets the bits of a Smi as a word.
+ const Operator* BitcastTaggedSignedToWord();
+
// This operator reinterprets the bits of a tagged MaybeObject pointer as
// word.
const Operator* BitcastMaybeObjectToWord();
@@ -462,6 +468,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float64SilenceNaN();
// SIMD operators.
+ const Operator* F64x2Splat();
+ const Operator* F64x2Abs();
+ const Operator* F64x2Neg();
+ const Operator* F64x2ExtractLane(int32_t);
+ const Operator* F64x2ReplaceLane(int32_t);
+ const Operator* F64x2Eq();
+ const Operator* F64x2Ne();
+ const Operator* F64x2Lt();
+ const Operator* F64x2Le();
+
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
const Operator* F32x4ReplaceLane(int32_t);
@@ -483,6 +499,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Lt();
const Operator* F32x4Le();
+ const Operator* I64x2Splat();
+ const Operator* I64x2ExtractLane(int32_t);
+ const Operator* I64x2ReplaceLane(int32_t);
+ const Operator* I64x2Neg();
+ const Operator* I64x2Shl(int32_t);
+ const Operator* I64x2ShrS(int32_t);
+ const Operator* I64x2Add();
+ const Operator* I64x2Sub();
+ const Operator* I64x2Mul();
+ const Operator* I64x2Eq();
+ const Operator* I64x2Ne();
+ const Operator* I64x2GtS();
+ const Operator* I64x2GeS();
+ const Operator* I64x2ShrU(int32_t);
+ const Operator* I64x2GtU();
+ const Operator* I64x2GeU();
+
const Operator* I32x4Splat();
const Operator* I32x4ExtractLane(int32_t);
const Operator* I32x4ReplaceLane(int32_t);
@@ -585,6 +618,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
+ const Operator* S1x2AnyTrue();
+ const Operator* S1x2AllTrue();
const Operator* S1x4AnyTrue();
const Operator* S1x4AllTrue();
const Operator* S1x8AnyTrue();
@@ -620,6 +655,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
+ // Memory barrier.
+ const Operator* MemBarrier();
+
// atomic-load [base + index]
const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-load [base + index]
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index f43ba0d155..07ac95b4f7 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -19,7 +19,7 @@ MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect)
: broker_(broker), object_(object) {
ZoneHandleSet<Map> maps;
auto result =
- NodeProperties::InferReceiverMaps(broker_, object_, effect, &maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker_, object_, effect, &maps);
maps_.insert(maps_.end(), maps.begin(), maps.end());
maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps)
? kUnreliableDontNeedGuard
@@ -65,21 +65,25 @@ bool MapInference::AllOfInstanceTypes(std::function<bool(InstanceType)> f) {
bool MapInference::AllOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
- // TODO(neis): Brokerize the MapInference.
- AllowHandleDereference allow_handle_deref;
CHECK(HaveMaps());
- return std::all_of(maps_.begin(), maps_.end(),
- [f](Handle<Map> map) { return f(map->instance_type()); });
+ auto instance_type = [this, f](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return f(map_ref.instance_type());
+ };
+ return std::all_of(maps_.begin(), maps_.end(), instance_type);
}
bool MapInference::AnyOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
- AllowHandleDereference allow_handle_deref;
CHECK(HaveMaps());
- return std::any_of(maps_.begin(), maps_.end(),
- [f](Handle<Map> map) { return f(map->instance_type()); });
+ auto instance_type = [this, f](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return f(map_ref.instance_type());
+ };
+
+ return std::any_of(maps_.begin(), maps_.end(), instance_type);
}
MapHandles const& MapInference::GetMaps() {
@@ -122,7 +126,10 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies,
const VectorSlotPair& feedback) {
if (Safe()) return true;
- auto is_stable = [](Handle<Map> map) { return map->is_stable(); };
+ auto is_stable = [this](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return map_ref.is_stable();
+ };
if (dependencies != nullptr &&
std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) {
for (Handle<Map> map : maps_) {
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 29cbb4d26c..368c060c1d 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/memory-optimizer.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -20,7 +21,8 @@ namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
- const char* function_debug_name)
+ const char* function_debug_name,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
@@ -29,7 +31,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
allocation_folding_(allocation_folding),
- function_debug_name_(function_debug_name) {}
+ function_debug_name_(function_debug_name),
+ tick_counter_(tick_counter) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -99,7 +102,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
@@ -108,6 +111,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoad:
case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
+ case IrOpcode::kLoadFromObject:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
@@ -118,6 +122,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
+ case IrOpcode::kStoreToObject:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
@@ -214,6 +219,7 @@ Node* EffectPhiForPhi(Node* phi) {
} // namespace
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
+ tick_counter_->DoTick();
DCHECK(!node->IsDead());
DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) {
@@ -296,6 +302,21 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
+ Node* allocate_builtin;
+ if (allocation_type == AllocationType::kYoung) {
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInYoungGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
+ }
+ } else {
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInOldGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
+ }
+ }
+
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
@@ -371,11 +392,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime);
{
- Node* target = allocation_type == AllocationType::kYoung
- ? __
- AllocateInYoungGenerationStubConstant()
- : __
- AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -384,7 +400,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
- __ Call(allocate_operator_.get(), target, size));
+ __ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
@@ -434,11 +450,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
- Node* target = allocation_type == AllocationType::kYoung
- ? __
- AllocateInYoungGenerationStubConstant()
- : __
- AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -446,7 +457,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
- __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
+ __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
@@ -483,8 +494,6 @@ void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
- Node* offset = node->InputAt(1);
- node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
EnqueueUses(node, state);
}
@@ -494,9 +503,7 @@ void MemoryOptimizer::VisitStoreToObject(Node* node,
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
- Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
- node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index cbefcb67de..71f33fa3d7 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -10,6 +10,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -36,7 +39,7 @@ class MemoryOptimizer final {
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
- const char* function_debug_name);
+ const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
void Optimize();
@@ -158,6 +161,7 @@ class MemoryOptimizer final {
PoisoningMitigationLevel poisoning_level_;
AllocationFolding allocation_folding_;
const char* function_debug_name_;
+ TickCounter* const tick_counter_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index d6528c553a..1e00ec00f4 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -5,6 +5,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/map-inference.h"
@@ -392,7 +393,7 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
}
// static
-NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
+NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 4a23b6781d..a660fe7022 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -151,7 +151,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
kReliableReceiverMaps, // Receiver maps can be trusted.
kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
};
- static InferReceiverMapsResult InferReceiverMaps(
+ // DO NOT USE InferReceiverMapsUnsafe IN NEW CODE. Use MapInference instead.
+ static InferReceiverMapsResult InferReceiverMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 50cfdf6248..7688379e9f 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -303,7 +303,13 @@ void Node::Print() const {
void Node::Print(std::ostream& os) const {
os << *this << std::endl;
for (Node* input : this->inputs()) {
- os << " " << *input << std::endl;
+ os << " ";
+ if (input) {
+ os << *input;
+ } else {
+ os << "(NULL)";
+ }
+ os << std::endl;
}
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 9ac8ec581f..d621e23e3a 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -45,6 +45,7 @@
V(NumberConstant) \
V(PointerConstant) \
V(HeapConstant) \
+ V(CompressedHeapConstant) \
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
@@ -231,6 +232,7 @@
// Opcodes for VirtuaMachine-level operators.
#define SIMPLIFIED_CHANGE_OP_LIST(V) \
+ V(ChangeCompressedSignedToInt32) \
V(ChangeTaggedSignedToInt32) \
V(ChangeTaggedSignedToInt64) \
V(ChangeTaggedToInt32) \
@@ -240,6 +242,7 @@
V(ChangeTaggedToTaggedSigned) \
V(ChangeCompressedToTaggedSigned) \
V(ChangeTaggedToCompressedSigned) \
+ V(ChangeInt31ToCompressedSigned) \
V(ChangeInt31ToTaggedSigned) \
V(ChangeInt32ToTagged) \
V(ChangeInt64ToTagged) \
@@ -249,6 +252,8 @@
V(ChangeFloat64ToTaggedPointer) \
V(ChangeTaggedToBit) \
V(ChangeBitToTagged) \
+ V(ChangeUint64ToBigInt) \
+ V(TruncateBigIntToUint64) \
V(TruncateTaggedToWord32) \
V(TruncateTaggedToFloat64) \
V(TruncateTaggedToBit) \
@@ -262,6 +267,7 @@
V(CheckedUint32Div) \
V(CheckedUint32Mod) \
V(CheckedInt32Mul) \
+ V(CheckedInt32ToCompressedSigned) \
V(CheckedInt32ToTaggedSigned) \
V(CheckedInt64ToInt32) \
V(CheckedInt64ToTaggedSigned) \
@@ -318,6 +324,8 @@
V(NumberMin) \
V(NumberPow)
+#define SIMPLIFIED_BIGINT_BINOP_LIST(V) V(BigIntAdd)
+
#define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
V(SpeculativeNumberAdd) \
V(SpeculativeNumberSubtract) \
@@ -369,6 +377,11 @@
V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
+#define SIMPLIFIED_BIGINT_UNOP_LIST(V) \
+ V(BigIntAsUintN) \
+ V(BigIntNegate) \
+ V(CheckBigInt)
+
#define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) V(SpeculativeToNumber)
#define SIMPLIFIED_OTHER_OP_LIST(V) \
@@ -382,6 +395,7 @@
V(StringCodePointAt) \
V(StringFromSingleCharCode) \
V(StringFromSingleCodePoint) \
+ V(StringFromCodePointAt) \
V(StringIndexOf) \
V(StringLength) \
V(StringToLowerCaseIntl) \
@@ -461,16 +475,24 @@
V(FindOrderedHashMapEntryForInt32Key) \
V(PoisonIndex) \
V(RuntimeAbort) \
+ V(AssertType) \
V(DateNow)
+#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) V(SpeculativeBigIntAdd)
+#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) V(SpeculativeBigIntNegate)
+
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
SIMPLIFIED_CHECKED_OP_LIST(V) \
SIMPLIFIED_COMPARE_BINOP_LIST(V) \
SIMPLIFIED_NUMBER_BINOP_LIST(V) \
+ SIMPLIFIED_BIGINT_BINOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_BIGINT_UNOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \
SIMPLIFIED_OTHER_OP_LIST(V)
// Opcodes for Machine-level operators.
@@ -616,7 +638,7 @@
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_WORD64_ATOMIC_OP_LIST(V) \
- V(DebugAbort) \
+ V(AbortCSAAssert) \
V(DebugBreak) \
V(Comment) \
V(Load) \
@@ -631,6 +653,7 @@
V(Word64ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
+ V(BitcastTaggedSignedToWord) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
@@ -692,6 +715,7 @@
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
+ V(MemoryBarrier) \
V(Word32AtomicLoad) \
V(Word32AtomicStore) \
V(Word32AtomicExchange) \
@@ -718,6 +742,15 @@
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
+ V(F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -739,6 +772,22 @@
V(F32x4Le) \
V(F32x4Gt) \
V(F32x4Ge) \
+ V(I64x2Splat) \
+ V(I64x2ExtractLane) \
+ V(I64x2ReplaceLane) \
+ V(I64x2Neg) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I64x2ShrU) \
+ V(I64x2GtU) \
+ V(I64x2GeU) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
V(I32x4ReplaceLane) \
@@ -844,6 +893,8 @@
V(S128Xor) \
V(S128Select) \
V(S8x16Shuffle) \
+ V(S1x2AnyTrue) \
+ V(S1x2AllTrue) \
V(S1x4AnyTrue) \
V(S1x4AllTrue) \
V(S1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 475623f76b..8cb991ceb7 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/operation-typer.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
#include "src/execution/isolate.h"
@@ -259,7 +260,8 @@ Type OperationTyper::ConvertReceiver(Type type) {
type = Type::Intersect(type, Type::Receiver(), zone());
if (maybe_primitive) {
// ConvertReceiver maps null and undefined to the JSGlobalProxy of the
- // target function, and all other primitives are wrapped into a JSValue.
+ // target function, and all other primitives are wrapped into a
+ // JSPrimitiveWrapper.
type = Type::Union(type, Type::OtherObject(), zone());
}
return type;
@@ -577,6 +579,13 @@ Type OperationTyper::NumberSilenceNaN(Type type) {
return type;
}
+Type OperationTyper::BigIntAsUintN(Type type) {
+ DCHECK(type.Is(Type::BigInt()));
+ return Type::BigInt();
+}
+
+Type OperationTyper::CheckBigInt(Type type) { return Type::BigInt(); }
+
Type OperationTyper::NumberAdd(Type lhs, Type rhs) {
DCHECK(lhs.Is(Type::Number()));
DCHECK(rhs.Is(Type::Number()));
@@ -1111,6 +1120,26 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
#undef SPECULATIVE_NUMBER_BINOP
+Type OperationTyper::BigIntAdd(Type lhs, Type rhs) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ return Type::BigInt();
+}
+
+Type OperationTyper::BigIntNegate(Type type) {
+ if (type.IsNone()) return type;
+ return Type::BigInt();
+}
+
+Type OperationTyper::SpeculativeBigIntAdd(Type lhs, Type rhs) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ return Type::BigInt();
+}
+
+Type OperationTyper::SpeculativeBigIntNegate(Type type) {
+ if (type.IsNone()) return type;
+ return Type::BigInt();
+}
+
Type OperationTyper::SpeculativeToNumber(Type type) {
return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone()));
}
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index a905662ad1..728e297a1b 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -43,14 +43,18 @@ class V8_EXPORT_PRIVATE OperationTyper {
// Unary operators.
#define DECLARE_METHOD(Name) Type Name(Type type);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
DECLARE_METHOD(ConvertReceiver)
#undef DECLARE_METHOD
-// Number binary operators.
+// Numeric binary operators.
#define DECLARE_METHOD(Name) Type Name(Type lhs, Type rhs);
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
// Comparison operators.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e771cef123..eb060b71e1 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -16,6 +16,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
+#include "src/compiler/add-type-assertions-reducer.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/frame-elider.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -34,6 +35,7 @@
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/csa-load-elimination.h"
#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/decompression-elimination.h"
#include "src/compiler/effect-control-linearizer.h"
@@ -114,7 +116,8 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(isolate_, info_->zone())),
+ broker_(new JSHeapBroker(isolate_, info_->zone(),
+ info_->trace_heap_broker_enabled())),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {
@@ -266,7 +269,7 @@ class PipelineData {
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
MachineGraph* mcgraph() const { return mcgraph_; }
- Handle<Context> native_context() const {
+ Handle<NativeContext> native_context() const {
return handle(info()->native_context(), isolate());
}
Handle<JSGlobalObject> global_object() const {
@@ -324,7 +327,8 @@ class PipelineData {
Typer* CreateTyper() {
DCHECK_NULL(typer_);
- typer_ = new Typer(broker(), typer_flags_, graph());
+ typer_ =
+ new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
return typer_;
}
@@ -397,7 +401,8 @@ class PipelineData {
DCHECK_NULL(frame_);
int fixed_frame_size = 0;
if (call_descriptor != nullptr) {
- fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
+ fixed_frame_size =
+ call_descriptor->CalculateFixedFrameSize(info()->code_kind());
}
frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
@@ -408,7 +413,8 @@ class PipelineData {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), flags, debug_name());
+ sequence(), flags, &info()->tick_counter(),
+ debug_name());
}
void InitializeOsrHelper() {
@@ -1040,6 +1046,119 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
code->set_can_have_weak_objects(true);
}
+class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
+ public:
+ WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph,
+ Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions)
+ // Note that the OptimizedCompilationInfo is not initialized at the time
+ // we pass it to the CompilationJob constructor, but it is not
+ // dereferenced there.
+ : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
+ "TurboFan"),
+ debug_name_(std::move(debug_name)),
+ info_(CStrVector(debug_name_.get()), graph->zone(), kind),
+ call_descriptor_(call_descriptor),
+ zone_stats_(isolate->allocator()),
+ zone_(std::move(zone)),
+ graph_(graph),
+ data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions,
+ new (zone_.get()) NodeOriginTable(graph_), nullptr, options),
+ pipeline_(&data_) {}
+
+ ~WasmHeapStubCompilationJob() = default;
+
+ protected:
+ Status PrepareJobImpl(Isolate* isolate) final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
+
+ private:
+ std::unique_ptr<char[]> debug_name_;
+ OptimizedCompilationInfo info_;
+ CallDescriptor* call_descriptor_;
+ ZoneStats zone_stats_;
+ std::unique_ptr<Zone> zone_;
+ Graph* graph_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
+};
+
+// static
+std::unique_ptr<OptimizedCompilationJob>
+Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone,
+ Graph* graph, Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions) {
+ return base::make_unique<WasmHeapStubCompilationJob>(
+ isolate, call_descriptor, std::move(zone), graph, kind,
+ std::move(debug_name), options, source_positions);
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info_, isolate->GetTurboStatistics(), &zone_stats_));
+ pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
+ }
+ if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data_.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method " << info_.GetDebugName().get()
+ << " using TurboFan" << std::endl;
+ }
+ if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind())
+ << " graph -- " << std::endl
+ << AsRPO(*data_.graph());
+ }
+
+ if (info_.trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(&info_, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info_.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+ pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
+ pipeline_.ComputeScheduledGraph();
+ if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
+ return CompilationJob::SUCCEEDED;
+ }
+ return CompilationJob::FAILED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
+ Handle<Code> code;
+ if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) &&
+ pipeline_.CommitDependencies(code)) {
+ info_.SetCode(code);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ code->Disassemble(compilation_info()->GetDebugName().get(), os);
+ }
+#endif
+ return SUCCEEDED;
+ }
+ return FAILED;
+}
+
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
@@ -1065,7 +1184,7 @@ struct GraphBuilderPhase {
handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(),
- SourcePosition::kNotInlined, flags);
+ SourcePosition::kNotInlined, flags, &data->info()->tick_counter());
}
};
@@ -1102,7 +1221,7 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
Isolate* isolate = data->isolate();
OptimizedCompilationInfo* info = data->info();
- GraphReducer graph_reducer(temp_zone, data->graph(),
+ GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1196,6 +1315,7 @@ struct UntyperPhase {
}
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
@@ -1216,6 +1336,7 @@ struct CopyMetadataForConcurrentCompilePhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
@@ -1242,13 +1363,13 @@ struct SerializationPhase {
if (data->info()->is_source_positions_enabled()) {
flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
}
- if (data->info()->is_osr()) {
- flags |= SerializerForBackgroundCompilationFlag::kOsr;
+ if (data->info()->is_analyze_environment_liveness()) {
+ flags |=
+ SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- SerializerForBackgroundCompilation serializer(
- data->broker(), data->dependencies(), temp_zone,
- data->info()->closure(), flags);
- serializer.Run();
+ RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(),
+ temp_zone, data->info()->closure(),
+ flags, data->info()->osr_offset());
}
};
@@ -1257,6 +1378,7 @@ struct TypedLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1292,9 +1414,12 @@ struct EscapeAnalysisPhase {
static const char* phase_name() { return "V8.TFEscapeAnalysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
+ EscapeAnalysis escape_analysis(data->jsgraph(),
+ &data->info()->tick_counter(), temp_zone);
escape_analysis.ReduceGraph();
- GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead());
+ GraphReducer reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
escape_analysis.analysis_result(),
temp_zone);
@@ -1305,13 +1430,28 @@ struct EscapeAnalysisPhase {
}
};
+struct TypeAssertionsPhase {
+ static const char* phase_name() { return "V8.TFTypeAssertions"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &type_assertions);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct SimplifiedLoweringPhase {
static const char* phase_name() { return "V8.TFSimplifiedLowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel());
+ data->info()->GetPoisoningMitigationLevel(),
+ &data->info()->tick_counter());
lowering.LowerAllNodes();
}
};
@@ -1325,8 +1465,8 @@ struct LoopPeelingPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- LoopTree* loop_tree =
- LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(
+ data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
data->source_positions(), data->node_origins())
.PeelInnerLoopsOfTree();
@@ -1346,6 +1486,7 @@ struct GenericLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer);
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -1358,6 +1499,7 @@ struct EarlyOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1384,7 +1526,8 @@ struct ControlFlowOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
ControlFlowOptimizer optimizer(data->graph(), data->common(),
- data->machine(), temp_zone);
+ data->machine(),
+ &data->info()->tick_counter(), temp_zone);
optimizer.Optimize();
}
};
@@ -1406,8 +1549,9 @@ struct EffectControlLinearizationPhase {
// fix the effect and control flow for nodes with low-level side
// effects (such as changing representation to tagged or
// 'floating' allocation regions.)
- Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kTempSchedule);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ temp_zone, data->graph(), Scheduler::kTempSchedule,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
TraceSchedule(data->info(), data, schedule,
"effect linearization schedule");
@@ -1433,6 +1577,7 @@ struct EffectControlLinearizationPhase {
// doing a common operator reducer and dead code elimination just before
// it, to eliminate conditional deopts with a constant condition.
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1455,7 +1600,8 @@ struct StoreStoreEliminationPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- StoreStoreElimination::Run(data->jsgraph(), temp_zone);
+ StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
+ temp_zone);
}
};
@@ -1464,6 +1610,7 @@ struct LoadEliminationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1513,7 +1660,7 @@ struct MemoryOptimizationPhase {
data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
- data->debug_name());
+ data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize();
}
};
@@ -1523,6 +1670,7 @@ struct LateOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1555,6 +1703,7 @@ struct MachineOperatorOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
@@ -1565,11 +1714,38 @@ struct MachineOperatorOptimizationPhase {
}
};
+struct CsaEarlyOptimizationPhase {
+ static const char* phase_name() { return "V8.CSAEarlyOptimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+ CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &load_elimination);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct CsaOptimizationPhase {
static const char* phase_name() { return "V8.CSAOptimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1621,9 +1797,10 @@ struct ComputeSchedulePhase {
void Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = Scheduler::ComputeSchedule(
- temp_zone, data->graph(), data->info()->is_splitting_enabled()
- ? Scheduler::kSplitNodes
- : Scheduler::kNoFlags);
+ temp_zone, data->graph(),
+ data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
data->set_schedule(schedule);
}
@@ -1671,6 +1848,7 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table_enabled()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
+ &data->info()->tick_counter(),
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
@@ -1920,7 +2098,8 @@ struct PrintGraphPhase {
Schedule* schedule = data->schedule();
if (schedule == nullptr) {
schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kNoFlags);
+ Scheduler::kNoFlags,
+ &info->tick_counter());
}
AllowHandleDereference allow_deref;
@@ -2089,6 +2268,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
}
+ if (FLAG_assert_types) {
+ Run<TypeAssertionsPhase>();
+ RunPrintAndVerify(TypeAssertionsPhase::phase_name());
+ }
+
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
@@ -2201,6 +2385,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
}
+ pipeline.Run<CsaEarlyOptimizationPhase>();
+ pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
+
// Optimize memory access and allocation operations.
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
@@ -2331,58 +2518,6 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
}
// static
-MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Code::Kind kind, const char* debug_name, const AssemblerOptions& options,
- SourcePositionTable* source_positions) {
- OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
- // Construct a pipeline for scheduling and code generation.
- ZoneStats zone_stats(isolate->allocator());
- NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
- PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
- source_positions, node_positions, nullptr, options);
- std::unique_ptr<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(
- &info, isolate->GetTurboStatistics(), &zone_stats));
- pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
- }
-
- PipelineImpl pipeline(&data);
-
- if (info.trace_turbo_json_enabled() ||
- info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
- }
-
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
- << std::endl
- << AsRPO(*graph);
- }
-
- if (info.trace_turbo_json_enabled()) {
- TurboJsonFile json_of(&info, std::ios_base::trunc);
- json_of << "{\"function\":\"" << info.GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
- }
-
- pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
- pipeline.ComputeScheduledGraph();
-
- Handle<Code> code;
- if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
- pipeline.CommitDependencies(code)) {
- return code;
- }
- return MaybeHandle<Code>();
-}
-
-// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker) {
@@ -2449,11 +2584,11 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
}
// static
-OptimizedCompilationJob* Pipeline::NewCompilationJob(
+std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
- return new PipelineCompilationJob(isolate, shared, function);
+ return base::make_unique<PipelineCompilationJob>(isolate, shared, function);
}
// static
@@ -2490,13 +2625,14 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
data.BeginPhaseKind("V8.WasmOptimization");
- const bool is_asm_js = module->origin == wasm::kAsmJsOrigin;
+ const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
data.info()->MarkAsSplittingEnabled();
}
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
@@ -2515,6 +2651,7 @@ void Pipeline::GenerateCodeForWasmFunction(
} else {
PipelineRunScope scope(&data, "V8.WasmBaseOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
AddReducer(&data, &graph_reducer, &value_numbering);
@@ -2870,8 +3007,9 @@ bool PipelineImpl::SelectInstructionsAndAssemble(
}
MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
- if (!SelectInstructionsAndAssemble(call_descriptor))
+ if (!SelectInstructionsAndAssemble(call_descriptor)) {
return MaybeHandle<Code>();
+ }
return FinalizeCode();
}
@@ -2928,6 +3066,9 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (data->info()->is_turbo_preprocess_ranges()) {
flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
}
+ if (data->info()->trace_turbo_allocation_enabled()) {
+ flags |= RegisterAllocationFlag::kTraceAllocation;
+ }
data->InitializeRegisterAllocationData(config, call_descriptor, flags);
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 7f9a242d98..6898faaad0 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -41,9 +41,8 @@ class SourcePositionTable;
class Pipeline : public AllStatic {
public:
// Returns a new compilation job for the given JavaScript function.
- static OptimizedCompilationJob* NewCompilationJob(Isolate* isolate,
- Handle<JSFunction> function,
- bool has_script);
+ static std::unique_ptr<OptimizedCompilationJob> NewCompilationJob(
+ Isolate* isolate, Handle<JSFunction> function, bool has_script);
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
@@ -60,11 +59,11 @@ class Pipeline : public AllStatic {
const char* debug_name, const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
- // Run the pipeline on a machine graph and generate code.
- static MaybeHandle<Code> GenerateCodeForWasmHeapStub(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Code::Kind kind, const char* debug_name,
- const AssemblerOptions& assembler_options,
+ // Returns a new compilation job for a wasm heap stub.
+ static std::unique_ptr<OptimizedCompilationJob> NewWasmHeapStubCompilationJob(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind,
+ std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
SourcePositionTable* source_positions = nullptr);
// Run the pipeline on a machine graph and generate code.
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index dafd481797..99a06ef874 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -127,7 +127,7 @@ Node* PropertyAccessBuilder::ResolveHolder(
PropertyAccessInfo const& access_info, Node* receiver) {
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- return jsgraph()->Constant(holder);
+ return jsgraph()->Constant(ObjectRef(broker(), holder));
}
return receiver;
}
@@ -151,7 +151,16 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* receiver) {
+ // TODO(neis): Eliminate FastPropertyAt call below by doing the lookup during
+ // acccess info computation. Requires extra care in the case where the
+ // receiver is the holder.
+ AllowCodeDependencyChange dependency_change_;
+ AllowHandleAllocation handle_allocation_;
+ AllowHandleDereference handle_dereference_;
+ AllowHeapAllocation heap_allocation_;
+
if (!access_info.IsDataConstant()) return nullptr;
+
// First, determine if we have a constant holder to load from.
Handle<JSObject> holder;
// If {access_info} has a holder, just use it.
@@ -165,7 +174,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
MapRef receiver_map = m.Ref(broker()).map();
if (std::find_if(access_info.receiver_maps().begin(),
access_info.receiver_maps().end(), [&](Handle<Map> map) {
- return map.address() == receiver_map.object().address();
+ return map.equals(receiver_map.object());
}) == access_info.receiver_maps().end()) {
// The map of the receiver is not in the feedback, let us bail out.
return nullptr;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index dc1edc710d..277c89c932 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -556,8 +556,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
current_block_ = nullptr;
}
-void RawMachineAssembler::DebugAbort(Node* message) {
- AddNode(machine()->DebugAbort(), message);
+void RawMachineAssembler::AbortCSAAssert(Node* message) {
+ AddNode(machine()->AbortCSAAssert(), message);
}
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 67326ac730..890c38c551 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -732,6 +732,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* BitcastTaggedToWord(Node* a) {
return AddNode(machine()->BitcastTaggedToWord(), a);
}
+ Node* BitcastTaggedSignedToWord(Node* a) {
+ return AddNode(machine()->BitcastTaggedSignedToWord(), a);
+ }
Node* BitcastMaybeObjectToWord(Node* a) {
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
}
@@ -1016,7 +1019,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
- void DebugAbort(Node* message);
+ void AbortCSAAssert(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const std::string& msg);
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 0822e47bba..9b401bcf43 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -19,6 +19,7 @@ RedundancyElimination::~RedundancyElimination() = default;
Reduction RedundancyElimination::Reduce(Node* node) {
if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
+ case IrOpcode::kCheckBigInt:
case IrOpcode::kCheckBounds:
case IrOpcode::kCheckEqualsInternalizedString:
case IrOpcode::kCheckEqualsSymbol:
@@ -147,7 +148,9 @@ bool CheckSubsumes(Node const* a, Node const* b) {
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckBigInt:
break;
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedInt64ToInt32:
case IrOpcode::kCheckedInt64ToTaggedSigned:
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index cebd87e73d..7a4577b799 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/type-cache.h"
@@ -25,12 +26,14 @@ const char* Truncation::description() const {
return "truncate-to-bool";
case TruncationKind::kWord32:
return "truncate-to-word32";
- case TruncationKind::kFloat64:
+ case TruncationKind::kWord64:
+ return "truncate-to-word64";
+ case TruncationKind::kOddballAndBigIntToNumber:
switch (identify_zeros()) {
case kIdentifyZeros:
- return "truncate-to-float64 (identify zeros)";
+ return "truncate-oddball&bigint-to-number (identify zeros)";
case kDistinguishZeros:
- return "truncate-to-float64 (distinguish zeros)";
+ return "truncate-oddball&bigint-to-number (distinguish zeros)";
}
case TruncationKind::kAny:
switch (identify_zeros()) {
@@ -45,22 +48,25 @@ const char* Truncation::description() const {
// Partial order for truncations:
//
-// kAny <-------+
-// ^ |
-// | |
-// kFloat64 |
-// ^ |
-// / |
-// kWord32 kBool
-// ^ ^
-// \ /
-// \ /
-// \ /
-// \ /
-// \ /
-// kNone
+// kAny <-------+
+// ^ |
+// | |
+// kOddballAndBigIntToNumber |
+// ^ |
+// / |
+// kWord64 |
+// ^ |
+// | |
+// kWord32 kBool
+// ^ ^
+// \ /
+// \ /
+// \ /
+// \ /
+// \ /
+// kNone
//
-// TODO(jarin) We might consider making kBool < kFloat64.
+// TODO(jarin) We might consider making kBool < kOddballAndBigIntToNumber.
// static
Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
@@ -68,9 +74,9 @@ Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
if (LessGeneral(rep1, rep2)) return rep2;
if (LessGeneral(rep2, rep1)) return rep1;
// Handle the generalization of float64-representable values.
- if (LessGeneral(rep1, TruncationKind::kFloat64) &&
- LessGeneral(rep2, TruncationKind::kFloat64)) {
- return TruncationKind::kFloat64;
+ if (LessGeneral(rep1, TruncationKind::kOddballAndBigIntToNumber) &&
+ LessGeneral(rep2, TruncationKind::kOddballAndBigIntToNumber)) {
+ return TruncationKind::kOddballAndBigIntToNumber;
}
// Handle the generalization of any-representable values.
if (LessGeneral(rep1, TruncationKind::kAny) &&
@@ -101,9 +107,16 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny;
case TruncationKind::kWord32:
return rep2 == TruncationKind::kWord32 ||
- rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
- case TruncationKind::kFloat64:
- return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord64:
+ return rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
+ case TruncationKind::kOddballAndBigIntToNumber:
+ return rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
case TruncationKind::kAny:
return rep2 == TruncationKind::kAny;
}
@@ -125,10 +138,11 @@ bool IsWord(MachineRepresentation rep) {
} // namespace
-RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
+RepresentationChanger::RepresentationChanger(JSGraph* jsgraph,
+ JSHeapBroker* broker)
: cache_(TypeCache::Get()),
jsgraph_(jsgraph),
- isolate_(isolate),
+ broker_(broker),
testing_type_errors_(false),
type_error_(false) {}
@@ -169,7 +183,8 @@ Node* RepresentationChanger::GetRepresentationFor(
use_node, use_info);
case MachineRepresentation::kTaggedPointer:
DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
- use_info.type_check() == TypeCheckKind::kHeapObject);
+ use_info.type_check() == TypeCheckKind::kHeapObject ||
+ use_info.type_check() == TypeCheckKind::kBigInt);
return GetTaggedPointerRepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kTagged:
@@ -207,7 +222,8 @@ Node* RepresentationChanger::GetRepresentationFor(
use_info);
case MachineRepresentation::kWord64:
DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
- use_info.type_check() == TypeCheckKind::kSigned64);
+ use_info.type_check() == TypeCheckKind::kSigned64 ||
+ use_info.type_check() == TypeCheckKind::kBigInt);
return GetWord64RepresentationFor(node, output_rep, output_type, use_node,
use_info);
case MachineRepresentation::kSimd128:
@@ -418,6 +434,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else if (output_type.Is(Type::BigInt())) {
+ op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -447,16 +465,37 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
+ } else if (IsAnyTagged(output_rep) &&
+ (use_info.type_check() == TypeCheckKind::kBigInt ||
+ output_type.Is(Type::BigInt()))) {
+ if (output_type.Is(Type::BigInt())) {
+ return node;
+ }
+ op = simplified()->CheckBigInt(use_info.feedback());
} else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ if (use_info.type_check() == TypeCheckKind::kBigInt &&
+ !output_type.Is(Type::BigInt())) {
+ node = InsertChangeCompressedToTagged(node);
+ op = simplified()->CheckBigInt(use_info.feedback());
+ } else {
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ }
+ } else if (output_rep == MachineRepresentation::kCompressed &&
+ output_type.Is(Type::BigInt())) {
op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kCompressed &&
+ use_info.type_check() == TypeCheckKind::kBigInt) {
+ node = InsertChangeCompressedToTagged(node);
+ op = simplified()->CheckBigInt(use_info.feedback());
} else if (CanBeCompressedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts
+ // for CompressedSigned output representation.
+ op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback());
}
- // TODO(turbofan): Consider adding a Bailout operator that just deopts
- // for CompressedSigned output representation.
- op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -535,6 +574,9 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_type.Is(cache_->kSafeInteger)) {
// int64 -> tagged
op = simplified()->ChangeInt64ToTagged();
+ } else if (output_type.Is(Type::BigInt())) {
+ // uint64 -> BigInt
+ op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
@@ -560,7 +602,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
op = simplified()->ChangeUint32ToTagged();
} else if (output_type.Is(Type::Number()) ||
(output_type.Is(Type::NumberOrOddball()) &&
- truncation.IsUsedAsFloat64())) {
+ truncation.TruncatesOddballAndBigIntToNumber())) {
op = simplified()->ChangeFloat64ToTagged(
output_type.Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
@@ -569,7 +611,11 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
}
- } else if (IsAnyCompressed(output_rep)) {
+ } else if (output_rep == MachineRepresentation::kCompressedSigned) {
+ op = machine()->ChangeCompressedSignedToTaggedSigned();
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kCompressed) {
op = machine()->ChangeCompressedToTagged();
} else {
return TypeError(node, output_rep, output_type,
@@ -606,9 +652,20 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor(
use_node, use_info);
op = machine()->ChangeTaggedSignedToCompressedSigned();
} else if (IsWord(output_rep)) {
- node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
- use_node, use_info);
- op = machine()->ChangeTaggedSignedToCompressedSigned();
+ if (output_type.Is(Type::Signed31())) {
+ op = simplified()->ChangeInt31ToCompressedSigned();
+ } else if (output_type.Is(Type::Signed32())) {
+ if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback());
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kCompressedSigned);
+ }
+ } else {
+ node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedSignedToCompressedSigned();
+ }
} else if (output_rep == MachineRepresentation::kWord64) {
node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
use_node, use_info);
@@ -645,10 +702,11 @@ Node* RepresentationChanger::GetCompressedPointerRepresentationFor(
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts
+ // for TaggedSigned output representation.
+ op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback());
}
- // TODO(turbofan): Consider adding a Bailout operator that just deopts
- // for TaggedSigned output representation.
- op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback());
} else if (output_rep == MachineRepresentation::kBit) {
// TODO(v8:8977): specialize here and below
node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
@@ -810,11 +868,14 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* use_node, UseInfo use_info) {
NumberMatcher m(node);
if (m.HasValue()) {
+ // BigInts are not used as number constants.
+ DCHECK(use_info.type_check() != TypeCheckKind::kBigInt);
switch (use_info.type_check()) {
case TypeCheckKind::kNone:
case TypeCheckKind::kNumber:
case TypeCheckKind::kNumberOrOddball:
return jsgraph()->Float64Constant(m.Value());
+ case TypeCheckKind::kBigInt:
case TypeCheckKind::kHeapObject:
case TypeCheckKind::kSigned32:
case TypeCheckKind::kSigned64:
@@ -843,9 +904,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kBit) {
CHECK(output_type.Is(Type::Boolean()));
- // TODO(tebbi): TypeCheckKind::kNumberOrOddball should imply Float64
- // truncation, since this exactly means that we treat Oddballs as Numbers.
- if (use_info.truncation().IsUsedAsFloat64() ||
+ if (use_info.truncation().TruncatesOddballAndBigIntToNumber() ||
use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = machine()->ChangeUint32ToFloat64();
} else {
@@ -867,7 +926,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (output_type.Is(Type::Number())) {
op = simplified()->ChangeTaggedToFloat64();
} else if ((output_type.Is(Type::NumberOrOddball()) &&
- use_info.truncation().IsUsedAsFloat64()) ||
+ use_info.truncation().TruncatesOddballAndBigIntToNumber()) ||
output_type.Is(Type::NumberOrHole())) {
// JavaScript 'null' is an Oddball that results in +0 when truncated to
// Number. In a context like -0 == null, which must evaluate to false,
@@ -1063,11 +1122,15 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
output_type, use_node, use_info);
} else if (output_rep == MachineRepresentation::kCompressedSigned) {
// TODO(v8:8977): Specialise here
- op = machine()->ChangeCompressedSignedToTaggedSigned();
- node = jsgraph()->graph()->NewNode(op, node);
- return GetWord32RepresentationFor(node,
- MachineRepresentation::kTaggedSigned,
- output_type, use_node, use_info);
+ if (output_type.Is(Type::SignedSmall())) {
+ op = simplified()->ChangeCompressedSignedToInt32();
+ } else {
+ op = machine()->ChangeCompressedSignedToTaggedSigned();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetWord32RepresentationFor(node,
+ MachineRepresentation::kTaggedSigned,
+ output_type, use_node, use_info);
+ }
} else if (output_rep == MachineRepresentation::kCompressedPointer) {
// TODO(v8:8977): Specialise here
op = machine()->ChangeCompressedPointerToTaggedPointer();
@@ -1252,6 +1315,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
}
break;
}
+ case IrOpcode::kHeapConstant: {
+ HeapObjectMatcher m(node);
+ if (m.HasValue() && m.Ref(broker_).IsBigInt()) {
+ auto bigint = m.Ref(broker_).AsBigInt();
+ return jsgraph()->Int64Constant(
+ static_cast<int64_t>(bigint.AsUint64()));
+ }
+ break;
+ }
default:
break;
}
@@ -1272,9 +1344,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
unreachable);
} else if (IsWord(output_rep)) {
- if (output_type.Is(Type::Unsigned32())) {
+ if (output_type.Is(Type::Unsigned32OrMinusZero())) {
+ // uint32 -> uint64
+ CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()),
+ use_info.truncation().IdentifiesZeroAndMinusZero());
op = machine()->ChangeUint32ToUint64();
- } else if (output_type.Is(Type::Signed32())) {
+ } else if (output_type.Is(Type::Signed32OrMinusZero())) {
+ // int32 -> int64
+ CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()),
+ use_info.truncation().IdentifiesZeroAndMinusZero());
op = machine()->ChangeInt32ToInt64();
} else {
return TypeError(node, output_rep, output_type,
@@ -1323,6 +1401,13 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
+ } else if (IsAnyTagged(output_rep) &&
+ use_info.truncation().IsUsedAsWord64() &&
+ (use_info.type_check() == TypeCheckKind::kBigInt ||
+ output_type.Is(Type::BigInt()))) {
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = simplified()->TruncateBigIntToUint64();
} else if (CanBeTaggedPointer(output_rep)) {
if (output_type.Is(cache_->kInt64)) {
op = simplified()->ChangeTaggedToInt64();
@@ -1656,6 +1741,13 @@ Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) {
return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
}
+Node* RepresentationChanger::InsertChangeCompressedToTagged(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeCompressedToTagged(),
+ node);
+}
+
+Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index e8bb3f12ac..d338667603 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -29,8 +29,13 @@ class Truncation final {
static Truncation Word32() {
return Truncation(TruncationKind::kWord32, kIdentifyZeros);
}
- static Truncation Float64(IdentifyZeros identify_zeros = kDistinguishZeros) {
- return Truncation(TruncationKind::kFloat64, identify_zeros);
+ static Truncation Word64() {
+ return Truncation(TruncationKind::kWord64, kIdentifyZeros);
+ }
+ static Truncation OddballAndBigIntToNumber(
+ IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return Truncation(TruncationKind::kOddballAndBigIntToNumber,
+ identify_zeros);
}
static Truncation Any(IdentifyZeros identify_zeros = kDistinguishZeros) {
return Truncation(TruncationKind::kAny, identify_zeros);
@@ -50,8 +55,11 @@ class Truncation final {
bool IsUsedAsWord32() const {
return LessGeneral(kind_, TruncationKind::kWord32);
}
- bool IsUsedAsFloat64() const {
- return LessGeneral(kind_, TruncationKind::kFloat64);
+ bool IsUsedAsWord64() const {
+ return LessGeneral(kind_, TruncationKind::kWord64);
+ }
+ bool TruncatesOddballAndBigIntToNumber() const {
+ return LessGeneral(kind_, TruncationKind::kOddballAndBigIntToNumber);
}
bool IdentifiesUndefinedAndZero() {
return LessGeneral(kind_, TruncationKind::kWord32) ||
@@ -81,13 +89,15 @@ class Truncation final {
kNone,
kBool,
kWord32,
- kFloat64,
+ kWord64,
+ kOddballAndBigIntToNumber,
kAny
};
explicit Truncation(TruncationKind kind, IdentifyZeros identify_zeros)
: kind_(kind), identify_zeros_(identify_zeros) {
- DCHECK(kind == TruncationKind::kAny || kind == TruncationKind::kFloat64 ||
+ DCHECK(kind == TruncationKind::kAny ||
+ kind == TruncationKind::kOddballAndBigIntToNumber ||
identify_zeros == kIdentifyZeros);
}
TruncationKind kind() const { return kind_; }
@@ -109,7 +119,8 @@ enum class TypeCheckKind : uint8_t {
kSigned64,
kNumber,
kNumberOrOddball,
- kHeapObject
+ kHeapObject,
+ kBigInt,
};
inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
@@ -128,6 +139,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "NumberOrOddball";
case TypeCheckKind::kHeapObject:
return os << "HeapObject";
+ case TypeCheckKind::kBigInt:
+ return os << "BigInt";
}
UNREACHABLE();
}
@@ -160,6 +173,13 @@ class UseInfo {
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo CheckedBigIntTruncatingWord64(const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(),
+ TypeCheckKind::kBigInt, feedback);
+ }
static UseInfo Word64() {
return UseInfo(MachineRepresentation::kWord64, Truncation::Any());
}
@@ -175,7 +195,7 @@ class UseInfo {
static UseInfo TruncatingFloat64(
IdentifyZeros identify_zeros = kDistinguishZeros) {
return UseInfo(MachineRepresentation::kFloat64,
- Truncation::Float64(identify_zeros));
+ Truncation::OddballAndBigIntToNumber(identify_zeros));
}
static UseInfo AnyTagged() {
return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
@@ -203,6 +223,12 @@ class UseInfo {
return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
TypeCheckKind::kHeapObject, feedback);
}
+
+ static UseInfo CheckedBigIntAsTaggedPointer(const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
+ TypeCheckKind::kBigInt, feedback);
+ }
+
static UseInfo CheckedSignedSmallAsTaggedSigned(
const VectorSlotPair& feedback,
IdentifyZeros identify_zeros = kDistinguishZeros) {
@@ -240,8 +266,6 @@ class UseInfo {
}
static UseInfo CheckedNumberOrOddballAsFloat64(
IdentifyZeros identify_zeros, const VectorSlotPair& feedback) {
- // TODO(tebbi): We should use Float64 truncation here, since this exactly
- // means that we treat Oddballs as Numbers.
return UseInfo(MachineRepresentation::kFloat64,
Truncation::Any(identify_zeros),
TypeCheckKind::kNumberOrOddball, feedback);
@@ -287,7 +311,7 @@ class UseInfo {
// Eagerly folds any representation changes for constants.
class V8_EXPORT_PRIVATE RepresentationChanger final {
public:
- RepresentationChanger(JSGraph* jsgraph, Isolate* isolate);
+ RepresentationChanger(JSGraph* jsgraph, JSHeapBroker* broker);
// Changes representation from {output_type} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
@@ -317,7 +341,7 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
private:
TypeCache const* cache_;
JSGraph* jsgraph_;
- Isolate* isolate_;
+ JSHeapBroker* broker_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -371,12 +395,13 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
Node* InsertChangeUint32ToFloat64(Node* node);
+ Node* InsertChangeCompressedToTagged(Node* node);
Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
Node* InsertTruncateInt64ToInt32(Node* node);
Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() const { return isolate_; }
+ Isolate* isolate() const;
Factory* factory() const { return isolate()->factory(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index b57162f7f5..25919bb3b3 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/adapters.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph.h"
@@ -26,7 +27,7 @@ namespace compiler {
} while (false)
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
- size_t node_count_hint)
+ size_t node_count_hint, TickCounter* tick_counter)
: zone_(zone),
graph_(graph),
schedule_(schedule),
@@ -34,12 +35,14 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
- node_data_(zone) {
+ node_data_(zone),
+ tick_counter_(tick_counter) {
node_data_.reserve(node_count_hint);
node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
}
-Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags,
+ TickCounter* tick_counter) {
Zone* schedule_zone =
(flags & Scheduler::kTempSchedule) ? zone : graph->zone();
@@ -50,7 +53,8 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
Schedule* schedule =
new (schedule_zone) Schedule(schedule_zone, node_count_hint);
- Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
+ Scheduler scheduler(zone, graph, schedule, flags, node_count_hint,
+ tick_counter);
scheduler.BuildCFG();
scheduler.ComputeSpecialRPONumbering();
@@ -65,7 +69,6 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
return schedule;
}
-
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
SchedulerData def = {schedule_->start(), 0, kUnknown};
return def;
@@ -258,6 +261,7 @@ class CFGBuilder : public ZoneObject {
Queue(scheduler_->graph_->end());
while (!queue_.empty()) { // Breadth-first backwards traversal.
+ scheduler_->tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
int max = NodeProperties::PastControlIndex(node);
@@ -283,6 +287,7 @@ class CFGBuilder : public ZoneObject {
component_end_ = schedule_->block(exit);
scheduler_->equivalence_->Run(exit);
while (!queue_.empty()) { // Breadth-first backwards traversal.
+ scheduler_->tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
@@ -728,11 +733,10 @@ class SpecialRPONumberer : public ZoneObject {
}
};
- int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
- BasicBlock* child, int unvisited) {
+ int Push(int depth, BasicBlock* child, int unvisited) {
if (child->rpo_number() == unvisited) {
- stack[depth].block = child;
- stack[depth].index = 0;
+ stack_[depth].block = child;
+ stack_[depth].index = 0;
child->set_rpo_number(kBlockOnStack);
return depth + 1;
}
@@ -780,7 +784,7 @@ class SpecialRPONumberer : public ZoneObject {
DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
previous_block_count_ = schedule_->BasicBlockCount();
- int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
+ int stack_depth = Push(0, entry, kBlockUnvisited1);
int num_loops = static_cast<int>(loops_.size());
while (stack_depth > 0) {
@@ -802,7 +806,7 @@ class SpecialRPONumberer : public ZoneObject {
} else {
// Push the successor onto the stack.
DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
- stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
+ stack_depth = Push(stack_depth, succ, kBlockUnvisited1);
}
} else {
// Finished with all successors; pop the stack and add the block.
@@ -827,7 +831,7 @@ class SpecialRPONumberer : public ZoneObject {
// edges that lead out of loops. Visits each block once, but linking loop
// sections together is linear in the loop size, so overall is
// O(|B| + max(loop_depth) * max(|loop|))
- stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
+ stack_depth = Push(0, entry, kBlockUnvisited2);
while (stack_depth > 0) {
SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
BasicBlock* block = frame->block;
@@ -874,7 +878,7 @@ class SpecialRPONumberer : public ZoneObject {
loop->AddOutgoing(zone_, succ);
} else {
// Push the successor onto the stack.
- stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
+ stack_depth = Push(stack_depth, succ, kBlockUnvisited2);
if (HasLoopNumber(succ)) {
// Push the inner loop onto the loop stack.
DCHECK(GetLoopNumber(succ) < num_loops);
@@ -958,8 +962,9 @@ class SpecialRPONumberer : public ZoneObject {
}
// Computes loop membership from the backedges of the control flow graph.
- void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
- size_t num_loops, ZoneVector<Backedge>* backedges) {
+ void ComputeLoopInfo(
+ ZoneVector<SpecialRPOStackFrame>& queue, // NOLINT(runtime/references)
+ size_t num_loops, ZoneVector<Backedge>* backedges) {
// Extend existing loop membership vectors.
for (LoopInfo& loop : loops_) {
loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
@@ -1234,6 +1239,7 @@ void Scheduler::PrepareUses() {
visited[node->id()] = true;
stack.push(node->input_edges().begin());
while (!stack.empty()) {
+ tick_counter_->DoTick();
Edge edge = *stack.top();
Node* node = edge.to();
if (visited[node->id()]) {
@@ -1262,6 +1268,7 @@ class ScheduleEarlyNodeVisitor {
for (Node* const root : *roots) {
queue_.push(root);
while (!queue_.empty()) {
+ scheduler_->tick_counter_->DoTick();
VisitNode(queue_.front());
queue_.pop();
}
@@ -1388,6 +1395,7 @@ class ScheduleLateNodeVisitor {
queue->push(node);
do {
+ scheduler_->tick_counter_->DoTick();
Node* const node = queue->front();
queue->pop();
VisitNode(node);
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index bd2f2780dd..3d1fa40025 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -15,6 +15,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -23,7 +26,6 @@ class ControlEquivalence;
class Graph;
class SpecialRPONumberer;
-
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
class V8_EXPORT_PRIVATE Scheduler {
@@ -34,7 +36,8 @@ class V8_EXPORT_PRIVATE Scheduler {
// The complete scheduling algorithm. Creates a new schedule and places all
// nodes from the graph into it.
- static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags);
+ static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags,
+ TickCounter* tick_counter);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
@@ -78,9 +81,10 @@ class V8_EXPORT_PRIVATE Scheduler {
CFGBuilder* control_flow_builder_; // Builds basic blocks for controls.
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
+ TickCounter* const tick_counter_;
Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
- size_t node_count_hint_);
+ size_t node_count_hint_, TickCounter* tick_counter);
inline SchedulerData DefaultSchedulerData();
inline SchedulerData* GetData(Node* node);
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index ecbd9cc030..5597850b06 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -6,30 +6,495 @@
#include <sstream>
+#include "src/base/optional.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/vector-slot-pair.h"
#include "src/handles/handles-inl.h"
+#include "src/ic/call-optimization.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects/code.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/shared-function-info-inl.h"
+#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
+#define CLEAR_ENVIRONMENT_LIST(V) \
+ V(CallRuntimeForPair) \
+ V(Debugger) \
+ V(ResumeGenerator) \
+ V(SuspendGenerator)
+
+#define KILL_ENVIRONMENT_LIST(V) \
+ V(Abort) \
+ V(ReThrow) \
+ V(Throw)
+
+#define CLEAR_ACCUMULATOR_LIST(V) \
+ V(Add) \
+ V(AddSmi) \
+ V(BitwiseAnd) \
+ V(BitwiseAndSmi) \
+ V(BitwiseNot) \
+ V(BitwiseOr) \
+ V(BitwiseOrSmi) \
+ V(BitwiseXor) \
+ V(BitwiseXorSmi) \
+ V(CallRuntime) \
+ V(CloneObject) \
+ V(CreateArrayFromIterable) \
+ V(CreateArrayLiteral) \
+ V(CreateEmptyArrayLiteral) \
+ V(CreateEmptyObjectLiteral) \
+ V(CreateMappedArguments) \
+ V(CreateObjectLiteral) \
+ V(CreateRegExpLiteral) \
+ V(CreateRestParameter) \
+ V(CreateUnmappedArguments) \
+ V(Dec) \
+ V(DeletePropertySloppy) \
+ V(DeletePropertyStrict) \
+ V(Div) \
+ V(DivSmi) \
+ V(Exp) \
+ V(ExpSmi) \
+ V(ForInContinue) \
+ V(ForInEnumerate) \
+ V(ForInNext) \
+ V(ForInStep) \
+ V(Inc) \
+ V(LdaLookupSlot) \
+ V(LdaLookupSlotInsideTypeof) \
+ V(LogicalNot) \
+ V(Mod) \
+ V(ModSmi) \
+ V(Mul) \
+ V(MulSmi) \
+ V(Negate) \
+ V(SetPendingMessage) \
+ V(ShiftLeft) \
+ V(ShiftLeftSmi) \
+ V(ShiftRight) \
+ V(ShiftRightLogical) \
+ V(ShiftRightLogicalSmi) \
+ V(ShiftRightSmi) \
+ V(StaLookupSlot) \
+ V(Sub) \
+ V(SubSmi) \
+ V(TestEqual) \
+ V(TestEqualStrict) \
+ V(TestGreaterThan) \
+ V(TestGreaterThanOrEqual) \
+ V(TestInstanceOf) \
+ V(TestLessThan) \
+ V(TestLessThanOrEqual) \
+ V(TestNull) \
+ V(TestReferenceEqual) \
+ V(TestTypeOf) \
+ V(TestUndefined) \
+ V(TestUndetectable) \
+ V(ToBooleanLogicalNot) \
+ V(ToName) \
+ V(ToNumber) \
+ V(ToNumeric) \
+ V(ToString) \
+ V(TypeOf)
+
+#define UNCONDITIONAL_JUMPS_LIST(V) \
+ V(Jump) \
+ V(JumpConstant) \
+ V(JumpLoop)
+
+#define CONDITIONAL_JUMPS_LIST(V) \
+ V(JumpIfFalse) \
+ V(JumpIfFalseConstant) \
+ V(JumpIfJSReceiver) \
+ V(JumpIfJSReceiverConstant) \
+ V(JumpIfNotNull) \
+ V(JumpIfNotNullConstant) \
+ V(JumpIfNotUndefined) \
+ V(JumpIfNotUndefinedConstant) \
+ V(JumpIfNull) \
+ V(JumpIfNullConstant) \
+ V(JumpIfToBooleanFalse) \
+ V(JumpIfToBooleanFalseConstant) \
+ V(JumpIfToBooleanTrue) \
+ V(JumpIfToBooleanTrueConstant) \
+ V(JumpIfTrue) \
+ V(JumpIfTrueConstant) \
+ V(JumpIfUndefined) \
+ V(JumpIfUndefinedConstant)
+
+#define IGNORED_BYTECODE_LIST(V) \
+ V(CallNoFeedback) \
+ V(IncBlockCounter) \
+ V(LdaNamedPropertyNoFeedback) \
+ V(StackCheck) \
+ V(StaNamedPropertyNoFeedback) \
+ V(ThrowReferenceErrorIfHole) \
+ V(ThrowSuperAlreadyCalledIfNotHole) \
+ V(ThrowSuperNotCalledIfHole)
+
+#define UNREACHABLE_BYTECODE_LIST(V) \
+ V(ExtraWide) \
+ V(Illegal) \
+ V(Wide)
+
+#define SUPPORTED_BYTECODE_LIST(V) \
+ V(CallAnyReceiver) \
+ V(CallJSRuntime) \
+ V(CallProperty) \
+ V(CallProperty0) \
+ V(CallProperty1) \
+ V(CallProperty2) \
+ V(CallUndefinedReceiver) \
+ V(CallUndefinedReceiver0) \
+ V(CallUndefinedReceiver1) \
+ V(CallUndefinedReceiver2) \
+ V(CallWithSpread) \
+ V(Construct) \
+ V(ConstructWithSpread) \
+ V(CreateBlockContext) \
+ V(CreateCatchContext) \
+ V(CreateClosure) \
+ V(CreateEvalContext) \
+ V(CreateFunctionContext) \
+ V(CreateWithContext) \
+ V(GetSuperConstructor) \
+ V(GetTemplateObject) \
+ V(InvokeIntrinsic) \
+ V(LdaConstant) \
+ V(LdaContextSlot) \
+ V(LdaCurrentContextSlot) \
+ V(LdaImmutableContextSlot) \
+ V(LdaImmutableCurrentContextSlot) \
+ V(LdaModuleVariable) \
+ V(LdaFalse) \
+ V(LdaGlobal) \
+ V(LdaGlobalInsideTypeof) \
+ V(LdaKeyedProperty) \
+ V(LdaLookupContextSlot) \
+ V(LdaLookupContextSlotInsideTypeof) \
+ V(LdaLookupGlobalSlot) \
+ V(LdaLookupGlobalSlotInsideTypeof) \
+ V(LdaNamedProperty) \
+ V(LdaNull) \
+ V(Ldar) \
+ V(LdaSmi) \
+ V(LdaTheHole) \
+ V(LdaTrue) \
+ V(LdaUndefined) \
+ V(LdaZero) \
+ V(Mov) \
+ V(PopContext) \
+ V(PushContext) \
+ V(Return) \
+ V(StaContextSlot) \
+ V(StaCurrentContextSlot) \
+ V(StaGlobal) \
+ V(StaInArrayLiteral) \
+ V(StaKeyedProperty) \
+ V(StaModuleVariable) \
+ V(StaNamedOwnProperty) \
+ V(StaNamedProperty) \
+ V(Star) \
+ V(SwitchOnGeneratorState) \
+ V(SwitchOnSmiNoFeedback) \
+ V(TestIn) \
+ CLEAR_ACCUMULATOR_LIST(V) \
+ CLEAR_ENVIRONMENT_LIST(V) \
+ CONDITIONAL_JUMPS_LIST(V) \
+ IGNORED_BYTECODE_LIST(V) \
+ KILL_ENVIRONMENT_LIST(V) \
+ UNCONDITIONAL_JUMPS_LIST(V) \
+ UNREACHABLE_BYTECODE_LIST(V)
+
+template <typename T>
+struct HandleComparator {
+ bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
+ return lhs.address() < rhs.address();
+ }
+};
+
+struct VirtualContext {
+ unsigned int distance;
+ Handle<Context> context;
+
+ VirtualContext(unsigned int distance_in, Handle<Context> context_in)
+ : distance(distance_in), context(context_in) {
+ CHECK_GT(distance, 0);
+ }
+ bool operator<(const VirtualContext& other) const {
+ return HandleComparator<Context>()(context, other.context) &&
+ distance < other.distance;
+ }
+};
+
+class FunctionBlueprint;
+using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
+using VirtualContextsSet = ZoneSet<VirtualContext>;
+using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
+using BlueprintsSet = ZoneSet<FunctionBlueprint>;
+
+class Hints {
+ public:
+ explicit Hints(Zone* zone);
+
+ const ConstantsSet& constants() const;
+ const MapsSet& maps() const;
+ const BlueprintsSet& function_blueprints() const;
+ const VirtualContextsSet& virtual_contexts() const;
+
+ void AddConstant(Handle<Object> constant);
+ void AddMap(Handle<Map> map);
+ void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
+ void AddVirtualContext(VirtualContext virtual_context);
+
+ void Add(const Hints& other);
+
+ void Clear();
+ bool IsEmpty() const;
+
+#ifdef ENABLE_SLOW_DCHECKS
+ bool Includes(Hints const& other) const;
+ bool Equals(Hints const& other) const;
+#endif
+
+ private:
+ VirtualContextsSet virtual_contexts_;
+ ConstantsSet constants_;
+ MapsSet maps_;
+ BlueprintsSet function_blueprints_;
+};
+
+using HintsVector = ZoneVector<Hints>;
+
+class FunctionBlueprint {
+ public:
+ FunctionBlueprint(Handle<JSFunction> function, Isolate* isolate, Zone* zone);
+
+ FunctionBlueprint(Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ const Hints& context_hints);
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
+ const Hints& context_hints() const { return context_hints_; }
+
+ bool operator<(const FunctionBlueprint& other) const {
+ // A feedback vector is never used for more than one SFI, so it can
+ // be used for strict ordering of blueprints.
+ DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_),
+ shared_.equals(other.shared_));
+ return HandleComparator<FeedbackVector>()(feedback_vector_,
+ other.feedback_vector_);
+ }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ Handle<FeedbackVector> feedback_vector_;
+ Hints context_hints_;
+};
+
+class CompilationSubject {
+ public:
+ explicit CompilationSubject(FunctionBlueprint blueprint)
+ : blueprint_(blueprint) {}
+
+ // The zone parameter is to correctly initialize the blueprint,
+ // which contains zone-allocated context information.
+ CompilationSubject(Handle<JSFunction> closure, Isolate* isolate, Zone* zone);
+
+ const FunctionBlueprint& blueprint() const { return blueprint_; }
+ MaybeHandle<JSFunction> closure() const { return closure_; }
+
+ private:
+ FunctionBlueprint blueprint_;
+ MaybeHandle<JSFunction> closure_;
+};
+
+// The SerializerForBackgroundCompilation makes sure that the relevant function
+// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
+// optimizations in the compiler, is copied to the heap broker.
+class SerializerForBackgroundCompilation {
+ public:
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset);
+ Hints Run(); // NOTE: Returns empty for an already-serialized function.
+
+ class Environment;
+
+ private:
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ CompilationSubject function, base::Optional<Hints> new_target,
+ const HintsVector& arguments,
+ SerializerForBackgroundCompilationFlags flags);
+
+ bool BailoutOnUninitialized(FeedbackSlot slot);
+
+ void TraverseBytecode();
+
+#define DECLARE_VISIT_BYTECODE(name, ...) \
+ void Visit##name(interpreter::BytecodeArrayIterator* iterator);
+ SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
+#undef DECLARE_VISIT_BYTECODE
+
+ void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, FeedbackSlot slot,
+ bool with_spread = false);
+ void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
+ ConvertReceiverMode receiver_mode,
+ bool with_spread = false);
+ void ProcessApiCall(Handle<SharedFunctionInfo> target,
+ const HintsVector& arguments);
+ void ProcessReceiverMapForApiCall(
+ FunctionTemplateInfoRef& target, // NOLINT(runtime/references)
+ Handle<Map> receiver);
+ void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
+ const HintsVector& arguments);
+
+ void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
+
+ void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
+ FeedbackSlot slot, AccessMode mode);
+ void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator,
+ AccessMode mode);
+ void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
+ FeedbackSlot slot, AccessMode mode);
+ void ProcessMapHintsForPromises(Hints const& receiver_hints);
+ void ProcessHintsForPromiseResolve(Hints const& resolution_hints);
+ void ProcessHintsForRegExpTest(Hints const& regexp_hints);
+ PropertyAccessInfo ProcessMapForRegExpTest(MapRef map);
+ void ProcessHintsForFunctionCall(Hints const& target_hints);
+
+ GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
+ NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
+ const MapHandles& maps, AccessMode mode, NameRef const& name);
+ ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
+ const MapHandles& maps, AccessMode mode,
+ KeyedAccessMode const& keyed_mode);
+ void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
+ base::Optional<NameRef> static_name);
+ void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
+
+ void ProcessCreateContext();
+ enum ContextProcessingMode {
+ kIgnoreSlot,
+ kSerializeSlot,
+ kSerializeSlotAndAddToAccumulator
+ };
+
+ void ProcessContextAccess(const Hints& context_hints, int slot, int depth,
+ ContextProcessingMode mode);
+ void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references)
+ int slot, ContextProcessingMode mode);
+ void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator);
+ void ProcessLdaLookupContextSlot(
+ interpreter::BytecodeArrayIterator* iterator);
+
+ // Performs extension lookups for [0, depth) like
+ // BytecodeGraphBuilder::CheckContextExtensions().
+ void ProcessCheckContextExtensions(int depth);
+
+ Hints RunChildSerializer(CompilationSubject function,
+ base::Optional<Hints> new_target,
+ const HintsVector& arguments, bool with_spread);
+
+ // When (forward-)branching bytecodes are encountered, e.g. a conditional
+ // jump, we call ContributeToJumpTargetEnvironment to "remember" the current
+ // environment, associated with the jump target offset. When serialization
+ // eventually reaches that offset, we call IncorporateJumpTargetEnvironment to
+ // merge that environment back into whatever is the current environment then.
+ // Note: Since there may be multiple jumps to the same target,
+ // ContributeToJumpTargetEnvironment may actually do a merge as well.
+ void ContributeToJumpTargetEnvironment(int target_offset);
+ void IncorporateJumpTargetEnvironment(int target_offset);
+
+ Handle<BytecodeArray> bytecode_array() const;
+ BytecodeAnalysis const& GetBytecodeAnalysis(bool serialize);
+
+ JSHeapBroker* broker() const { return broker_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Zone* zone() const { return zone_; }
+ Environment* environment() const { return environment_; }
+ SerializerForBackgroundCompilationFlags flags() const { return flags_; }
+ BailoutId osr_offset() const { return osr_offset_; }
+
+ JSHeapBroker* const broker_;
+ CompilationDependencies* const dependencies_;
+ Zone* const zone_;
+ Environment* const environment_;
+ ZoneUnorderedMap<int, Environment*> jump_target_environments_;
+ SerializerForBackgroundCompilationFlags const flags_;
+ BailoutId const osr_offset_;
+};
+
+void RunSerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset) {
+ SerializerForBackgroundCompilation serializer(broker, dependencies, zone,
+ closure, flags, osr_offset);
+ serializer.Run();
+}
+
using BytecodeArrayIterator = interpreter::BytecodeArrayIterator;
+FunctionBlueprint::FunctionBlueprint(Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ const Hints& context_hints)
+ : shared_(shared),
+ feedback_vector_(feedback_vector),
+ context_hints_(context_hints) {}
+
+FunctionBlueprint::FunctionBlueprint(Handle<JSFunction> function,
+ Isolate* isolate, Zone* zone)
+ : shared_(handle(function->shared(), isolate)),
+ feedback_vector_(handle(function->feedback_vector(), isolate)),
+ context_hints_(zone) {
+ context_hints_.AddConstant(handle(function->context(), isolate));
+}
+
CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
- Isolate* isolate)
- : blueprint_{handle(closure->shared(), isolate),
- handle(closure->feedback_vector(), isolate)},
- closure_(closure) {
+ Isolate* isolate, Zone* zone)
+ : blueprint_(closure, isolate, zone), closure_(closure) {
CHECK(closure->has_feedback_vector());
}
Hints::Hints(Zone* zone)
- : constants_(zone), maps_(zone), function_blueprints_(zone) {}
+ : virtual_contexts_(zone),
+ constants_(zone),
+ maps_(zone),
+ function_blueprints_(zone) {}
+
+#ifdef ENABLE_SLOW_DCHECKS
+namespace {
+template <typename K, typename Compare>
+bool SetIncludes(ZoneSet<K, Compare> const& lhs,
+ ZoneSet<K, Compare> const& rhs) {
+ return std::all_of(rhs.cbegin(), rhs.cend(),
+ [&](K const& x) { return lhs.find(x) != lhs.cend(); });
+}
+} // namespace
+bool Hints::Includes(Hints const& other) const {
+ return SetIncludes(constants(), other.constants()) &&
+ SetIncludes(function_blueprints(), other.function_blueprints()) &&
+ SetIncludes(maps(), other.maps());
+}
+bool Hints::Equals(Hints const& other) const {
+ return this->Includes(other) && other.Includes(*this);
+}
+#endif
const ConstantsSet& Hints::constants() const { return constants_; }
@@ -39,6 +504,14 @@ const BlueprintsSet& Hints::function_blueprints() const {
return function_blueprints_;
}
+const VirtualContextsSet& Hints::virtual_contexts() const {
+ return virtual_contexts_;
+}
+
+void Hints::AddVirtualContext(VirtualContext virtual_context) {
+ virtual_contexts_.insert(virtual_context);
+}
+
void Hints::AddConstant(Handle<Object> constant) {
constants_.insert(constant);
}
@@ -53,16 +526,29 @@ void Hints::Add(const Hints& other) {
for (auto x : other.constants()) AddConstant(x);
for (auto x : other.maps()) AddMap(x);
for (auto x : other.function_blueprints()) AddFunctionBlueprint(x);
+ for (auto x : other.virtual_contexts()) AddVirtualContext(x);
}
bool Hints::IsEmpty() const {
- return constants().empty() && maps().empty() && function_blueprints().empty();
+ return constants().empty() && maps().empty() &&
+ function_blueprints().empty() && virtual_contexts().empty();
}
std::ostream& operator<<(std::ostream& out,
+ const VirtualContext& virtual_context) {
+ out << "Distance " << virtual_context.distance << " from "
+ << Brief(*virtual_context.context) << std::endl;
+ return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const Hints& hints);
+
+std::ostream& operator<<(std::ostream& out,
const FunctionBlueprint& blueprint) {
- out << Brief(*blueprint.shared) << std::endl;
- out << Brief(*blueprint.feedback_vector) << std::endl;
+ out << Brief(*blueprint.shared()) << std::endl;
+ out << Brief(*blueprint.feedback_vector()) << std::endl;
+ !blueprint.context_hints().IsEmpty() && out << blueprint.context_hints()
+ << "):" << std::endl;
return out;
}
@@ -76,10 +562,14 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) {
for (FunctionBlueprint const& blueprint : hints.function_blueprints()) {
out << " blueprint " << blueprint << std::endl;
}
+ for (VirtualContext const& virtual_context : hints.virtual_contexts()) {
+ out << " virtual context " << virtual_context << std::endl;
+ }
return out;
}
void Hints::Clear() {
+ virtual_contexts_.clear();
constants_.clear();
maps_.clear();
function_blueprints_.clear();
@@ -92,50 +582,53 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
base::Optional<Hints> new_target, const HintsVector& arguments);
- bool IsDead() const { return environment_hints_.empty(); }
+ bool IsDead() const { return ephemeral_hints_.empty(); }
void Kill() {
DCHECK(!IsDead());
- environment_hints_.clear();
+ ephemeral_hints_.clear();
DCHECK(IsDead());
}
void Revive() {
DCHECK(IsDead());
- environment_hints_.resize(environment_hints_size(), Hints(zone()));
+ ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone()));
DCHECK(!IsDead());
}
- // When control flow bytecodes are encountered, e.g. a conditional jump,
- // the current environment needs to be stashed together with the target jump
- // address. Later, when this target bytecode is handled, the stashed
- // environment will be merged into the current one.
+ // Merge {other} into {this} environment (leaving {other} unmodified).
void Merge(Environment* other);
FunctionBlueprint function() const { return function_; }
+ Hints const& closure_hints() const { return closure_hints_; }
+ Hints const& current_context_hints() const { return current_context_hints_; }
+ Hints& current_context_hints() { return current_context_hints_; }
+ Hints const& return_value_hints() const { return return_value_hints_; }
+ Hints& return_value_hints() { return return_value_hints_; }
+
Hints& accumulator_hints() {
- CHECK_LT(accumulator_index(), environment_hints_.size());
- return environment_hints_[accumulator_index()];
+ CHECK_LT(accumulator_index(), ephemeral_hints_.size());
+ return ephemeral_hints_[accumulator_index()];
}
+
Hints& register_hints(interpreter::Register reg) {
+ if (reg.is_function_closure()) return closure_hints_;
+ if (reg.is_current_context()) return current_context_hints_;
int local_index = RegisterToLocalIndex(reg);
- CHECK_LT(local_index, environment_hints_.size());
- return environment_hints_[local_index];
+ CHECK_LT(local_index, ephemeral_hints_.size());
+ return ephemeral_hints_[local_index];
}
- Hints& return_value_hints() { return return_value_hints_; }
- // Clears all hints except those for the return value and the closure.
+ // Clears all hints except those for the context, return value, and the
+ // closure.
void ClearEphemeralHints() {
- DCHECK_EQ(environment_hints_.size(), function_closure_index() + 1);
- for (int i = 0; i < function_closure_index(); ++i) {
- environment_hints_[i].Clear();
- }
+ for (auto& hints : ephemeral_hints_) hints.Clear();
}
// Appends the hints for the given register range to {dst} (in order).
void ExportRegisterHints(interpreter::Register first, size_t count,
- HintsVector& dst);
+ HintsVector& dst); // NOLINT(runtime/references)
private:
friend std::ostream& operator<<(std::ostream& out, const Environment& env);
@@ -153,34 +646,39 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
int const parameter_count_;
int const register_count_;
- // environment_hints_ contains hints for the contents of the registers,
+ Hints closure_hints_;
+ Hints current_context_hints_;
+ Hints return_value_hints_;
+
+ // ephemeral_hints_ contains hints for the contents of the registers,
// the accumulator and the parameters. The layout is as follows:
- // [ parameters | registers | accumulator | context | closure ]
+ // [ parameters | registers | accumulator ]
// The first parameter is the receiver.
- HintsVector environment_hints_;
+ HintsVector ephemeral_hints_;
int accumulator_index() const { return parameter_count() + register_count(); }
- int current_context_index() const { return accumulator_index() + 1; }
- int function_closure_index() const { return current_context_index() + 1; }
- int environment_hints_size() const { return function_closure_index() + 1; }
-
- Hints return_value_hints_;
+ int ephemeral_hints_size() const { return accumulator_index() + 1; }
};
SerializerForBackgroundCompilation::Environment::Environment(
Zone* zone, CompilationSubject function)
: zone_(zone),
function_(function.blueprint()),
- parameter_count_(function_.shared->GetBytecodeArray().parameter_count()),
- register_count_(function_.shared->GetBytecodeArray().register_count()),
- environment_hints_(environment_hints_size(), Hints(zone), zone),
- return_value_hints_(zone) {
+ parameter_count_(
+ function_.shared()->GetBytecodeArray().parameter_count()),
+ register_count_(function_.shared()->GetBytecodeArray().register_count()),
+ closure_hints_(zone),
+ current_context_hints_(zone),
+ return_value_hints_(zone),
+ ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) {
Handle<JSFunction> closure;
if (function.closure().ToHandle(&closure)) {
- environment_hints_[function_closure_index()].AddConstant(closure);
+ closure_hints_.AddConstant(closure);
} else {
- environment_hints_[function_closure_index()].AddFunctionBlueprint(
- function.blueprint());
+ closure_hints_.AddFunctionBlueprint(function.blueprint());
}
+
+ // Consume blueprint context hint information.
+ current_context_hints().Add(function.blueprint().context_hints());
}
SerializerForBackgroundCompilation::Environment::Environment(
@@ -191,18 +689,19 @@ SerializerForBackgroundCompilation::Environment::Environment(
// the parameter_count.
size_t param_count = static_cast<size_t>(parameter_count());
for (size_t i = 0; i < std::min(arguments.size(), param_count); ++i) {
- environment_hints_[i] = arguments[i];
+ ephemeral_hints_[i] = arguments[i];
}
// Pad the rest with "undefined".
Hints undefined_hint(zone);
undefined_hint.AddConstant(isolate->factory()->undefined_value());
for (size_t i = arguments.size(); i < param_count; ++i) {
- environment_hints_[i] = undefined_hint;
+ ephemeral_hints_[i] = undefined_hint;
}
interpreter::Register new_target_reg =
- function_.shared->GetBytecodeArray()
+ function_.shared()
+ ->GetBytecodeArray()
.incoming_new_target_or_generator_register();
if (new_target_reg.is_valid()) {
DCHECK(register_hints(new_target_reg).IsEmpty());
@@ -219,16 +718,20 @@ void SerializerForBackgroundCompilation::Environment::Merge(
CHECK_EQ(parameter_count(), other->parameter_count());
CHECK_EQ(register_count(), other->register_count());
+ SLOW_DCHECK(closure_hints_.Equals(other->closure_hints_));
+
if (IsDead()) {
- environment_hints_ = other->environment_hints_;
+ ephemeral_hints_ = other->ephemeral_hints_;
+ SLOW_DCHECK(return_value_hints_.Includes(other->return_value_hints_));
CHECK(!IsDead());
return;
}
- CHECK_EQ(environment_hints_.size(), other->environment_hints_.size());
- for (size_t i = 0; i < environment_hints_.size(); ++i) {
- environment_hints_[i].Add(other->environment_hints_[i]);
+ CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size());
+ for (size_t i = 0; i < ephemeral_hints_.size(); ++i) {
+ ephemeral_hints_[i].Add(other->ephemeral_hints_[i]);
}
+
return_value_hints_.Add(other->return_value_hints_);
}
@@ -236,42 +739,39 @@ std::ostream& operator<<(
std::ostream& out,
const SerializerForBackgroundCompilation::Environment& env) {
std::ostringstream output_stream;
+ output_stream << "Function ";
+ env.function_.shared()->Name().Print(output_stream);
- for (size_t i = 0; i << env.parameter_count(); ++i) {
- Hints const& hints = env.environment_hints_[i];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for a" << i << ":\n" << hints;
- }
- }
- for (size_t i = 0; i << env.register_count(); ++i) {
- Hints const& hints = env.environment_hints_[env.parameter_count() + i];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for r" << i << ":\n" << hints;
- }
- }
- {
- Hints const& hints = env.environment_hints_[env.accumulator_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <accumulator>:\n" << hints;
+ if (env.IsDead()) {
+ output_stream << "dead\n";
+ } else {
+ output_stream << "alive\n";
+ for (int i = 0; i < static_cast<int>(env.ephemeral_hints_.size()); ++i) {
+ Hints const& hints = env.ephemeral_hints_[i];
+ if (!hints.IsEmpty()) {
+ if (i < env.parameter_count()) {
+ output_stream << "Hints for a" << i << ":\n";
+ } else if (i < env.parameter_count() + env.register_count()) {
+ int local_register = i - env.parameter_count();
+ output_stream << "Hints for r" << local_register << ":\n";
+ } else if (i == env.accumulator_index()) {
+ output_stream << "Hints for <accumulator>:\n";
+ } else {
+ UNREACHABLE();
+ }
+ output_stream << hints;
+ }
}
}
- {
- Hints const& hints = env.environment_hints_[env.function_closure_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <closure>:\n" << hints;
- }
+
+ if (!env.closure_hints().IsEmpty()) {
+ output_stream << "Hints for <closure>:\n" << env.closure_hints();
}
- {
- Hints const& hints = env.environment_hints_[env.current_context_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <context>:\n" << hints;
- }
+ if (!env.current_context_hints().IsEmpty()) {
+ output_stream << "Hints for <context>:\n" << env.current_context_hints();
}
- {
- Hints const& hints = env.return_value_hints_;
- if (!hints.IsEmpty()) {
- output_stream << "Hints for {return value}:\n" << hints;
- }
+ if (!env.return_value_hints().IsEmpty()) {
+ output_stream << "Hints for {return value}:\n" << env.return_value_hints();
}
out << output_stream.str();
@@ -280,25 +780,26 @@ std::ostream& operator<<(
int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
interpreter::Register reg) const {
- // TODO(mslekova): We also want to gather hints for the context.
- if (reg.is_current_context()) return current_context_index();
- if (reg.is_function_closure()) return function_closure_index();
if (reg.is_parameter()) {
return reg.ToParameterIndex(parameter_count());
} else {
+ DCHECK(!reg.is_function_closure());
return parameter_count() + reg.index();
}
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags)
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset)
: broker_(broker),
dependencies_(dependencies),
zone_(zone),
- environment_(new (zone) Environment(zone, {closure, broker_->isolate()})),
- stashed_environments_(zone),
- flags_(flags) {
+ environment_(new (zone) Environment(
+ zone, CompilationSubject(closure, broker_->isolate(), zone))),
+ jump_target_environments_(zone),
+ flags_(flags),
+ osr_offset_(osr_offset) {
JSFunctionRef(broker, closure).Serialize();
}
@@ -311,9 +812,9 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
zone_(zone),
environment_(new (zone) Environment(zone, broker_->isolate(), function,
new_target, arguments)),
- stashed_environments_(zone),
- flags_(flags) {
- DCHECK(!(flags_ & SerializerForBackgroundCompilationFlag::kOsr));
+ jump_target_environments_(zone),
+ flags_(flags),
+ osr_offset_(BailoutId::None()) {
TraceScope tracer(
broker_, this,
"SerializerForBackgroundCompilation::SerializerForBackgroundCompilation");
@@ -331,12 +832,12 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) {
return false;
}
- if (flags() & SerializerForBackgroundCompilationFlag::kOsr) {
+ if (!osr_offset().IsNone()) {
// Exclude OSR from this optimization because we might end up skipping the
// OSR entry point. TODO(neis): Support OSR?
return false;
}
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
if (!slot.IsInvalid() && nexus.IsUninitialized()) {
FeedbackSource source(nexus);
if (broker()->HasFeedback(source)) {
@@ -354,9 +855,9 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
Hints SerializerForBackgroundCompilation::Run() {
TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
- SharedFunctionInfoRef shared(broker(), environment()->function().shared);
- FeedbackVectorRef feedback_vector(broker(),
- environment()->function().feedback_vector);
+ SharedFunctionInfoRef shared(broker(), environment()->function().shared());
+ FeedbackVectorRef feedback_vector(
+ broker(), environment()->function().feedback_vector());
if (shared.IsSerializedForCompilation(feedback_vector)) {
TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo "
<< Brief(*shared.object())
@@ -382,9 +883,10 @@ Hints SerializerForBackgroundCompilation::Run() {
class ExceptionHandlerMatcher {
public:
explicit ExceptionHandlerMatcher(
- BytecodeArrayIterator const& bytecode_iterator)
+ BytecodeArrayIterator const& bytecode_iterator,
+ Handle<BytecodeArray> bytecode_array)
: bytecode_iterator_(bytecode_iterator) {
- HandlerTable table(*bytecode_iterator_.bytecode_array());
+ HandlerTable table(*bytecode_array);
for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) {
handlers_.insert(table.GetRangeHandler(i));
}
@@ -407,30 +909,53 @@ class ExceptionHandlerMatcher {
std::set<int>::const_iterator handlers_iterator_;
};
+Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
+ const {
+ return handle(environment()->function().shared()->GetBytecodeArray(),
+ broker()->isolate());
+}
+
+BytecodeAnalysis const& SerializerForBackgroundCompilation::GetBytecodeAnalysis(
+ bool serialize) {
+ return broker()->GetBytecodeAnalysis(
+ bytecode_array(), osr_offset(),
+ flags() &
+ SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness,
+ serialize);
+}
+
void SerializerForBackgroundCompilation::TraverseBytecode() {
- BytecodeArrayRef bytecode_array(
- broker(), handle(environment()->function().shared->GetBytecodeArray(),
- broker()->isolate()));
- BytecodeArrayIterator iterator(bytecode_array.object());
- ExceptionHandlerMatcher handler_matcher(iterator);
+ BytecodeAnalysis const& bytecode_analysis = GetBytecodeAnalysis(true);
+ BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation();
+
+ BytecodeArrayIterator iterator(bytecode_array());
+ ExceptionHandlerMatcher handler_matcher(iterator, bytecode_array());
for (; !iterator.done(); iterator.Advance()) {
- MergeAfterJump(&iterator);
+ int const current_offset = iterator.current_offset();
+ IncorporateJumpTargetEnvironment(current_offset);
+
+ TRACE_BROKER(broker(),
+ "Handling bytecode: " << current_offset << " "
+ << iterator.current_bytecode());
+ TRACE_BROKER(broker(), "Current environment: " << *environment());
if (environment()->IsDead()) {
- if (iterator.current_bytecode() ==
- interpreter::Bytecode::kResumeGenerator ||
- handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) {
+ if (handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) {
environment()->Revive();
} else {
continue; // Skip this bytecode since TF won't generate code for it.
}
}
- TRACE_BROKER(broker(),
- "Handling bytecode: " << iterator.current_offset() << " "
- << iterator.current_bytecode());
- TRACE_BROKER(broker(), "Current environment:\n" << *environment());
+ if (bytecode_analysis.IsLoopHeader(current_offset)) {
+ // Graph builder might insert jumps to resume targets in the loop body.
+ LoopInfo const& loop_info =
+ bytecode_analysis.GetLoopInfoFor(current_offset);
+ for (const auto& target : loop_info.resume_jump_targets()) {
+ ContributeToJumpTargetEnvironment(target.target_offset());
+ }
+ }
switch (iterator.current_bytecode()) {
#define DEFINE_BYTECODE_CASE(name) \
@@ -447,21 +972,6 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
}
}
-void SerializerForBackgroundCompilation::VisitIllegal(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
-void SerializerForBackgroundCompilation::VisitWide(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
-void SerializerForBackgroundCompilation::VisitExtraWide(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
BytecodeArrayIterator* iterator) {
interpreter::Register dst = iterator->GetRegisterOperand(0);
@@ -480,6 +990,20 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
}
}
+void SerializerForBackgroundCompilation::VisitGetTemplateObject(
+ BytecodeArrayIterator* iterator) {
+ ObjectRef description(
+ broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackVectorRef feedback_vector(
+ broker(), environment()->function().feedback_vector());
+ SharedFunctionInfoRef shared(broker(), environment()->function().shared());
+ JSArrayRef template_object =
+ shared.GetTemplateObject(description, feedback_vector, slot, true);
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(template_object.object());
+}
+
void SerializerForBackgroundCompilation::VisitLdaTrue(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
@@ -529,11 +1053,171 @@ void SerializerForBackgroundCompilation::VisitLdaSmi(
Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate()));
}
+void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
+ BytecodeArrayIterator* iterator) {
+ Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0);
+ // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (functionId == Runtime::kInlineAsyncFunctionResolve) {
+ interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+ size_t reg_count = iterator->GetRegisterCountOperand(2);
+ CHECK_EQ(reg_count, 3);
+ HintsVector arguments(zone());
+ environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+ Hints const& resolution_hints = arguments[1]; // The resolution object.
+ ProcessHintsForPromiseResolve(resolution_hints);
+ environment()->accumulator_hints().Clear();
+ return;
+ }
+ environment()->ClearEphemeralHints();
+}
+
void SerializerForBackgroundCompilation::VisitLdaConstant(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- handle(iterator->GetConstantForIndexOperand(0), broker()->isolate()));
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+}
+
+void SerializerForBackgroundCompilation::VisitPushContext(
+ BytecodeArrayIterator* iterator) {
+ // Transfer current context hints to the destination register hints.
+ Hints& current_context_hints = environment()->current_context_hints();
+ Hints& saved_context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ saved_context_hints.Clear();
+ saved_context_hints.Add(current_context_hints);
+
+ // New Context is in the accumulator. Put those hints into the current context
+ // register hints.
+ current_context_hints.Clear();
+ current_context_hints.Add(environment()->accumulator_hints());
+}
+
+void SerializerForBackgroundCompilation::VisitPopContext(
+ BytecodeArrayIterator* iterator) {
+ // Replace current context hints with hints given in the argument register.
+ Hints& new_context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ environment()->current_context_hints().Clear();
+ environment()->current_context_hints().Add(new_context_hints);
+}
+
+void SerializerForBackgroundCompilation::ProcessImmutableLoad(
+ ContextRef& context_ref, int slot, ContextProcessingMode mode) {
+ DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator);
+ base::Optional<ObjectRef> slot_value = context_ref.get(slot, true);
+
+ // Also, put the object into the constant hints for the accumulator.
+ if (mode == kSerializeSlotAndAddToAccumulator && slot_value.has_value()) {
+ environment()->accumulator_hints().AddConstant(slot_value.value().object());
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessContextAccess(
+ const Hints& context_hints, int slot, int depth,
+ ContextProcessingMode mode) {
+ // This function is for JSContextSpecialization::ReduceJSLoadContext and
+ // ReduceJSStoreContext. Those reductions attempt to eliminate as many
+ // loads as possible by making use of constant Context objects. In the
+ // case of an immutable load, ReduceJSLoadContext even attempts to load
+ // the value at {slot}, replacing the load with a constant.
+ for (auto x : context_hints.constants()) {
+ if (x->IsContext()) {
+ // Walk this context to the given depth and serialize the slot found.
+ ContextRef context_ref(broker(), x);
+ size_t remaining_depth = depth;
+ context_ref = context_ref.previous(&remaining_depth, true);
+ if (remaining_depth == 0 && mode != kIgnoreSlot) {
+ ProcessImmutableLoad(context_ref, slot, mode);
+ }
+ }
+ }
+ for (auto x : context_hints.virtual_contexts()) {
+ if (x.distance <= static_cast<unsigned int>(depth)) {
+ ContextRef context_ref(broker(), x.context);
+ size_t remaining_depth = depth - x.distance;
+ context_ref = context_ref.previous(&remaining_depth, true);
+ if (remaining_depth == 0 && mode != kIgnoreSlot) {
+ ProcessImmutableLoad(context_ref, slot, mode);
+ }
+ }
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitLdaContextSlot(
+ BytecodeArrayIterator* iterator) {
+ Hints& context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ Hints& context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth,
+ kSerializeSlotAndAddToAccumulator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth,
+ kSerializeSlotAndAddToAccumulator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaModuleVariable(
+ BytecodeArrayIterator* iterator) {
+ const int depth = iterator->GetUnsignedImmediateOperand(1);
+
+ // TODO(mvstanton): If we have a constant module, should we serialize the
+ // cell as well? Then we could put the value in the accumulator.
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(environment()->current_context_hints(),
+ Context::EXTENSION_INDEX, depth, kSerializeSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaModuleVariable(
+ BytecodeArrayIterator* iterator) {
+ const int depth = iterator->GetUnsignedImmediateOperand(1);
+ ProcessContextAccess(environment()->current_context_hints(),
+ Context::EXTENSION_INDEX, depth, kSerializeSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ Hints& register_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ ProcessContextAccess(register_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
}
void SerializerForBackgroundCompilation::VisitLdar(
@@ -558,14 +1242,60 @@ void SerializerForBackgroundCompilation::VisitMov(
environment()->register_hints(dst).Add(environment()->register_hints(src));
}
+void SerializerForBackgroundCompilation::VisitCreateFunctionContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateBlockContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateEvalContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateWithContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateCatchContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::ProcessCreateContext() {
+ Hints& accumulator_hints = environment()->accumulator_hints();
+ accumulator_hints.Clear();
+ Hints& current_context_hints = environment()->current_context_hints();
+
+ // For each constant context, we must create a virtual context from
+ // it of distance one.
+ for (auto x : current_context_hints.constants()) {
+ if (x->IsContext()) {
+ Handle<Context> as_context(Handle<Context>::cast(x));
+ accumulator_hints.AddVirtualContext(VirtualContext(1, as_context));
+ }
+ }
+
+ // For each virtual context, we must create a virtual context from
+ // it of distance {existing distance} + 1.
+ for (auto x : current_context_hints.virtual_contexts()) {
+ accumulator_hints.AddVirtualContext(
+ VirtualContext(x.distance + 1, x.context));
+ }
+}
+
void SerializerForBackgroundCompilation::VisitCreateClosure(
BytecodeArrayIterator* iterator) {
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(iterator->GetConstantForIndexOperand(0)),
- broker()->isolate());
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
Handle<FeedbackCell> feedback_cell =
- environment()->function().feedback_vector->GetClosureFeedbackCell(
+ environment()->function().feedback_vector()->GetClosureFeedbackCell(
iterator->GetIndexOperand(1));
FeedbackCellRef feedback_cell_ref(broker(), feedback_cell);
Handle<Object> cell_value(feedback_cell->value(), broker()->isolate());
@@ -573,8 +1303,13 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
environment()->accumulator_hints().Clear();
if (cell_value->IsFeedbackVector()) {
- environment()->accumulator_hints().AddFunctionBlueprint(
- {shared, Handle<FeedbackVector>::cast(cell_value)});
+ // Gather the context hints from the current context register hint
+ // structure.
+ FunctionBlueprint blueprint(shared,
+ Handle<FeedbackVector>::cast(cell_value),
+ environment()->current_context_hints());
+
+ environment()->accumulator_hints().AddFunctionBlueprint(blueprint);
}
}
@@ -685,6 +1420,16 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread(
ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true);
}
+void SerializerForBackgroundCompilation::VisitCallJSRuntime(
+ BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+
+ // BytecodeGraphBuilder::VisitCallJSRuntime needs the {runtime_index}
+ // slot in the native context to be serialized.
+ const int runtime_index = iterator->GetNativeContextIndexOperand(0);
+ broker()->native_context().get(runtime_index, true);
+}
+
Hints SerializerForBackgroundCompilation::RunChildSerializer(
CompilationSubject function, base::Optional<Hints> new_target,
const HintsVector& arguments, bool with_spread) {
@@ -700,14 +1445,14 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer(
padded.pop_back(); // Remove the spread element.
// Fill the rest with empty hints.
padded.resize(
- function.blueprint().shared->GetBytecodeArray().parameter_count(),
+ function.blueprint().shared()->GetBytecodeArray().parameter_count(),
Hints(zone()));
return RunChildSerializer(function, new_target, padded, false);
}
SerializerForBackgroundCompilation child_serializer(
broker(), dependencies(), zone(), function, new_target, arguments,
- flags().without(SerializerForBackgroundCompilationFlag::kOsr));
+ flags());
return child_serializer.Run();
}
@@ -734,7 +1479,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
// Incorporate feedback into hints.
base::Optional<HeapObjectRef> feedback = GetHeapObjectFeedback(
- broker(), environment()->function().feedback_vector, slot);
+ broker(), environment()->function().feedback_vector(), slot);
if (feedback.has_value() && feedback->map().is_callable()) {
if (new_target.has_value()) {
// Construct; feedback is new_target, which often is also the callee.
@@ -752,15 +1497,37 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
if (!hint->IsJSFunction()) continue;
Handle<JSFunction> function = Handle<JSFunction>::cast(hint);
- if (!function->shared().IsInlineable() || !function->has_feedback_vector())
- continue;
+ JSFunctionRef(broker(), function).Serialize();
+
+ Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
+
+ if (shared->IsApiFunction()) {
+ ProcessApiCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ } else if (shared->HasBuiltinId()) {
+ ProcessBuiltinCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ }
+
+ if (!shared->IsInlineable() || !function->has_feedback_vector()) continue;
environment()->accumulator_hints().Add(RunChildSerializer(
- {function, broker()->isolate()}, new_target, arguments, with_spread));
+ CompilationSubject(function, broker()->isolate(), zone()), new_target,
+ arguments, with_spread));
}
for (auto hint : callee.function_blueprints()) {
- if (!hint.shared->IsInlineable()) continue;
+ Handle<SharedFunctionInfo> shared = hint.shared();
+
+ if (shared->IsApiFunction()) {
+ ProcessApiCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ } else if (shared->HasBuiltinId()) {
+ ProcessBuiltinCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ }
+
+ if (!shared->IsInlineable()) continue;
environment()->accumulator_hints().Add(RunChildSerializer(
CompilationSubject(hint), new_target, arguments, with_spread));
}
@@ -788,22 +1555,222 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs(
ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
}
-void SerializerForBackgroundCompilation::ProcessJump(
- interpreter::BytecodeArrayIterator* iterator) {
- int jump_target = iterator->GetJumpTargetOffset();
- int current_offset = iterator->current_offset();
- if (current_offset >= jump_target) return;
+void SerializerForBackgroundCompilation::ProcessApiCall(
+ Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+ FunctionTemplateInfoRef target_template_info(
+ broker(), handle(target->function_data(), broker()->isolate()));
+ if (!target_template_info.has_call_code()) return;
+
+ target_template_info.SerializeCallCode();
+
+ SharedFunctionInfoRef target_ref(broker(), target);
+ target_ref.SerializeFunctionTemplateInfo();
+
+ if (target_template_info.accept_any_receiver() &&
+ target_template_info.is_signature_undefined())
+ return;
- stashed_environments_[jump_target] = new (zone()) Environment(*environment());
+ CHECK_GE(arguments.size(), 1);
+ Hints const& receiver_hints = arguments[0];
+ for (auto hint : receiver_hints.constants()) {
+ if (hint->IsUndefined()) {
+ // The receiver is the global proxy.
+ Handle<JSGlobalProxy> global_proxy =
+ broker()->native_context().global_proxy_object().object();
+ ProcessReceiverMapForApiCall(
+ target_template_info,
+ handle(global_proxy->map(), broker()->isolate()));
+ continue;
+ }
+
+ if (!hint->IsJSReceiver()) continue;
+ Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint));
+
+ ProcessReceiverMapForApiCall(target_template_info,
+ handle(receiver->map(), broker()->isolate()));
+ }
+
+ for (auto receiver_map : receiver_hints.maps()) {
+ ProcessReceiverMapForApiCall(target_template_info, receiver_map);
+ }
}
-void SerializerForBackgroundCompilation::MergeAfterJump(
+void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall(
+ FunctionTemplateInfoRef& target, Handle<Map> receiver) {
+ if (receiver->is_access_check_needed()) {
+ return;
+ }
+
+ MapRef receiver_map(broker(), receiver);
+ TRACE_BROKER(broker(), "Serializing holder for target:" << target);
+
+ target.LookupHolderOfExpectedType(receiver_map, true);
+}
+
+void SerializerForBackgroundCompilation::ProcessBuiltinCall(
+ Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+ DCHECK(target->HasBuiltinId());
+ const int builtin_id = target->builtin_id();
+ const char* name = Builtins::name(builtin_id);
+ TRACE_BROKER(broker(), "Serializing for call to builtin " << name);
+ switch (builtin_id) {
+ case Builtins::kPromisePrototypeCatch: {
+ // For JSCallReducer::ReducePromisePrototypeCatch.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromisePrototypeFinally: {
+ // For JSCallReducer::ReducePromisePrototypeFinally.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromisePrototypeThen: {
+ // For JSCallReducer::ReducePromisePrototypeThen.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromiseResolveTrampoline:
+ // For JSCallReducer::ReducePromiseInternalResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (arguments.size() >= 2) {
+ Hints const& resolution_hints = arguments[1];
+ ProcessHintsForPromiseResolve(resolution_hints);
+ }
+ break;
+ case Builtins::kPromiseInternalResolve:
+ // For JSCallReducer::ReducePromiseInternalResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (arguments.size() >= 3) {
+ Hints const& resolution_hints = arguments[2];
+ ProcessHintsForPromiseResolve(resolution_hints);
+ }
+ break;
+ case Builtins::kRegExpPrototypeTest: {
+ // For JSCallReducer::ReduceRegExpPrototypeTest.
+ if (arguments.size() >= 1) {
+ Hints const& regexp_hints = arguments[0];
+ ProcessHintsForRegExpTest(regexp_hints);
+ }
+ break;
+ }
+ case Builtins::kFunctionPrototypeCall:
+ if (arguments.size() >= 1) {
+ Hints const& target_hints = arguments[0];
+ ProcessHintsForFunctionCall(target_hints);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve(
+ Hints const& resolution_hints) {
+ auto processMap = [&](Handle<Map> map) {
+ broker()->CreateAccessInfoForLoadingThen(MapRef(broker(), map),
+ dependencies());
+ };
+
+ for (auto hint : resolution_hints.constants()) {
+ if (!hint->IsJSReceiver()) continue;
+ Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint));
+ processMap(handle(receiver->map(), broker()->isolate()));
+ }
+ for (auto map_hint : resolution_hints.maps()) {
+ processMap(map_hint);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessMapHintsForPromises(
+ Hints const& receiver_hints) {
+ // We need to serialize the prototypes on each receiver map.
+ for (auto constant : receiver_hints.constants()) {
+ if (!constant->IsJSPromise()) continue;
+ Handle<Map> map(Handle<HeapObject>::cast(constant)->map(),
+ broker()->isolate());
+ MapRef(broker(), map).SerializePrototype();
+ }
+ for (auto map : receiver_hints.maps()) {
+ if (!map->IsJSPromiseMap()) continue;
+ MapRef(broker(), map).SerializePrototype();
+ }
+}
+
+PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest(
+ MapRef map) {
+ PropertyAccessInfo ai_exec =
+ broker()->CreateAccessInfoForLoadingExec(map, dependencies());
+
+ Handle<JSObject> holder;
+ if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) {
+ // The property is on the prototype chain.
+ JSObjectRef holder_ref(broker(), holder);
+ holder_ref.GetOwnProperty(ai_exec.field_representation(),
+ ai_exec.field_index(), true);
+ }
+ return ai_exec;
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
+ Hints const& regexp_hints) {
+ for (auto hint : regexp_hints.constants()) {
+ if (!hint->IsJSRegExp()) continue;
+ Handle<JSRegExp> regexp(Handle<JSRegExp>::cast(hint));
+ Handle<Map> regexp_map(regexp->map(), broker()->isolate());
+ PropertyAccessInfo ai_exec =
+ ProcessMapForRegExpTest(MapRef(broker(), regexp_map));
+ Handle<JSObject> holder;
+ if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
+ // The property is on the object itself.
+ JSObjectRef holder_ref(broker(), regexp);
+ holder_ref.GetOwnProperty(ai_exec.field_representation(),
+ ai_exec.field_index(), true);
+ }
+ }
+
+ for (auto map : regexp_hints.maps()) {
+ if (!map->IsJSRegExpMap()) continue;
+ ProcessMapForRegExpTest(MapRef(broker(), map));
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall(
+ Hints const& target_hints) {
+ for (auto constant : target_hints.constants()) {
+ if (!constant->IsJSFunction()) continue;
+ JSFunctionRef func(broker(), constant);
+ func.Serialize();
+ }
+}
+
+void SerializerForBackgroundCompilation::ContributeToJumpTargetEnvironment(
+ int target_offset) {
+ auto it = jump_target_environments_.find(target_offset);
+ if (it == jump_target_environments_.end()) {
+ jump_target_environments_[target_offset] =
+ new (zone()) Environment(*environment());
+ } else {
+ it->second->Merge(environment());
+ }
+}
+
+void SerializerForBackgroundCompilation::IncorporateJumpTargetEnvironment(
+ int target_offset) {
+ auto it = jump_target_environments_.find(target_offset);
+ if (it != jump_target_environments_.end()) {
+ environment()->Merge(it->second);
+ jump_target_environments_.erase(it);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessJump(
interpreter::BytecodeArrayIterator* iterator) {
- int current_offset = iterator->current_offset();
- auto stash = stashed_environments_.find(current_offset);
- if (stash != stashed_environments_.end()) {
- environment()->Merge(stash->second);
- stashed_environments_.erase(stash);
+ int jump_target = iterator->GetJumpTargetOffset();
+ if (iterator->current_offset() < jump_target) {
+ ContributeToJumpTargetEnvironment(jump_target);
}
}
@@ -813,10 +1780,25 @@ void SerializerForBackgroundCompilation::VisitReturn(
environment()->ClearEphemeralHints();
}
+void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback(
+ interpreter::BytecodeArrayIterator* iterator) {
+ interpreter::JumpTableTargetOffsets targets =
+ iterator->GetJumpTableTargetOffsets();
+ for (const auto& target : targets) {
+ ContributeToJumpTargetEnvironment(target.target_offset);
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState(
+ interpreter::BytecodeArrayIterator* iterator) {
+ for (const auto& target : GetBytecodeAnalysis(false).resume_jump_targets()) {
+ ContributeToJumpTargetEnvironment(target.target_offset());
+ }
+}
+
void SerializerForBackgroundCompilation::Environment::ExportRegisterHints(
interpreter::Register first, size_t count, HintsVector& dst) {
- dst.resize(dst.size() + count, Hints(zone()));
- int reg_base = first.index();
+ const int reg_base = first.index();
for (int i = 0; i < static_cast<int>(count); ++i) {
dst.push_back(register_hints(interpreter::Register(reg_base + i)));
}
@@ -856,8 +1838,8 @@ GlobalAccessFeedback const*
SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess(
FeedbackSlot slot) {
if (slot.IsInvalid()) return nullptr;
- if (environment()->function().feedback_vector.is_null()) return nullptr;
- FeedbackSource source(environment()->function().feedback_vector, slot);
+ if (environment()->function().feedback_vector().is_null()) return nullptr;
+ FeedbackSource source(environment()->function().feedback_vector(), slot);
if (broker()->HasFeedback(source)) {
return broker()->GetGlobalAccessFeedback(source);
@@ -889,14 +1871,31 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof(
VisitLdaGlobal(iterator);
}
-void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot(
+void SerializerForBackgroundCompilation::ProcessCheckContextExtensions(
+ int depth) {
+ // for BytecodeGraphBuilder::CheckContextExtensions.
+ Hints& context_hints = environment()->current_context_hints();
+ for (int i = 0; i < depth; i++) {
+ ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i,
+ kSerializeSlot);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessLdaLookupGlobalSlot(
BytecodeArrayIterator* iterator) {
+ ProcessCheckContextExtensions(iterator->GetUnsignedImmediateOperand(2));
+ // TODO(neis): BytecodeGraphBilder may insert a JSLoadGlobal.
VisitLdaGlobal(iterator);
}
+void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupGlobalSlot(iterator);
+}
+
void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof(
BytecodeArrayIterator* iterator) {
- VisitLdaGlobal(iterator);
+ ProcessLdaLookupGlobalSlot(iterator);
}
void SerializerForBackgroundCompilation::VisitStaGlobal(
@@ -905,6 +1904,26 @@ void SerializerForBackgroundCompilation::VisitStaGlobal(
ProcessFeedbackForGlobalAccess(slot);
}
+void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot_index = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ ProcessCheckContextExtensions(depth);
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot_index, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupContextSlot(iterator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupContextSlot(iterator);
+}
+
namespace {
template <class MapContainer>
MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
@@ -922,9 +1941,10 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
ElementAccessFeedback const*
SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess(
- const MapHandles& maps, AccessMode mode) {
+ const MapHandles& maps, AccessMode mode,
+ KeyedAccessMode const& keyed_mode) {
ElementAccessFeedback const* result =
- broker()->ProcessFeedbackMapsForElementAccess(maps);
+ broker()->ProcessFeedbackMapsForElementAccess(maps, keyed_mode);
for (ElementAccessFeedback::MapIterator it = result->all_maps(broker());
!it.done(); it.advance()) {
switch (mode) {
@@ -952,9 +1972,34 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
ProcessMapForNamedPropertyAccess(map_ref, name);
AccessInfoFactory access_info_factory(broker(), dependencies(),
broker()->zone());
- access_infos.push_back(access_info_factory.ComputePropertyAccessInfo(
+ PropertyAccessInfo info(access_info_factory.ComputePropertyAccessInfo(
map, name.object(), mode));
+ access_infos.push_back(info);
+
+ // TODO(turbofan): We want to take receiver hints into account as well,
+ // not only the feedback maps.
+ // For JSNativeContextSpecialization::InlinePropertySetterCall
+ // and InlinePropertyGetterCall.
+ if (info.IsAccessorConstant() && !info.constant().is_null()) {
+ if (info.constant()->IsJSFunction()) {
+ // For JSCallReducer::ReduceCallApiFunction.
+ Handle<SharedFunctionInfo> sfi(
+ handle(Handle<JSFunction>::cast(info.constant())->shared(),
+ broker()->isolate()));
+ if (sfi->IsApiFunction()) {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
+ if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
+ ProcessReceiverMapForApiCall(fti_ref, map);
+ }
+ } else {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), Handle<FunctionTemplateInfo>::cast(info.constant()));
+ if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
+ }
+ }
}
+
DCHECK(!access_infos.empty());
return new (broker()->zone()) NamedAccessFeedback(name, access_infos);
}
@@ -962,9 +2007,9 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
FeedbackSlot slot, AccessMode mode, base::Optional<NameRef> static_name) {
if (slot.IsInvalid()) return;
- if (environment()->function().feedback_vector.is_null()) return;
+ if (environment()->function().feedback_vector().is_null()) return;
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
FeedbackSource source(nexus);
if (broker()->HasFeedback(source)) return;
@@ -992,8 +2037,10 @@ void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus);
if (name.has_value()) {
processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name);
- } else if (nexus.GetKeyType() == ELEMENT && nexus.ic_state() != MEGAMORPHIC) {
- processed = ProcessFeedbackMapsForElementAccess(maps, mode);
+ } else if (nexus.GetKeyType() == ELEMENT) {
+ DCHECK_NE(nexus.ic_state(), MEGAMORPHIC);
+ processed = ProcessFeedbackMapsForElementAccess(
+ maps, mode, KeyedAccessMode::FromNexus(nexus));
}
broker()->SetFeedback(source, processed);
}
@@ -1087,8 +2134,8 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
BytecodeArrayIterator* iterator, AccessMode mode) {
Hints const& receiver =
environment()->register_hints(iterator->GetRegisterOperand(0));
- Handle<Name> name(Name::cast(iterator->GetConstantForIndexOperand(1)),
- broker()->isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ iterator->GetConstantForIndexOperand(1, broker()->isolate()));
FeedbackSlot slot = iterator->GetSlotOperand(2);
ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode);
}
@@ -1176,6 +2223,31 @@ UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP)
IGNORED_BYTECODE_LIST(DEFINE_IGNORE)
#undef DEFINE_IGNORE
+#define DEFINE_UNREACHABLE(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ UNREACHABLE(); \
+ }
+UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE)
+#undef DEFINE_UNREACHABLE
+
+#define DEFINE_KILL(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ environment()->Kill(); \
+ }
+KILL_ENVIRONMENT_LIST(DEFINE_KILL)
+#undef DEFINE_KILL
+
+#undef CLEAR_ENVIRONMENT_LIST
+#undef KILL_ENVIRONMENT_LIST
+#undef CLEAR_ACCUMULATOR_LIST
+#undef UNCONDITIONAL_JUMPS_LIST
+#undef CONDITIONAL_JUMPS_LIST
+#undef IGNORED_BYTECODE_LIST
+#undef UNREACHABLE_BYTECODE_LIST
+#undef SUPPORTED_BYTECODE_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index 0ee37ef280..881ed61a55 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -5,346 +5,31 @@
#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
-#include "src/base/optional.h"
-#include "src/compiler/access-info.h"
-#include "src/utils/utils.h"
#include "src/handles/handles.h"
-#include "src/handles/maybe-handles.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
-namespace interpreter {
-class BytecodeArrayIterator;
-} // namespace interpreter
-
-class BytecodeArray;
-class FeedbackVector;
-class LookupIterator;
-class NativeContext;
-class ScriptContextTable;
-class SharedFunctionInfo;
-class SourcePositionTableIterator;
+class BailoutId;
class Zone;
namespace compiler {
-#define CLEAR_ENVIRONMENT_LIST(V) \
- V(Abort) \
- V(CallRuntime) \
- V(CallRuntimeForPair) \
- V(CreateBlockContext) \
- V(CreateEvalContext) \
- V(CreateFunctionContext) \
- V(Debugger) \
- V(PopContext) \
- V(PushContext) \
- V(ResumeGenerator) \
- V(ReThrow) \
- V(StaContextSlot) \
- V(StaCurrentContextSlot) \
- V(SuspendGenerator) \
- V(SwitchOnGeneratorState) \
- V(Throw)
-
-#define CLEAR_ACCUMULATOR_LIST(V) \
- V(Add) \
- V(AddSmi) \
- V(BitwiseAnd) \
- V(BitwiseAndSmi) \
- V(BitwiseNot) \
- V(BitwiseOr) \
- V(BitwiseOrSmi) \
- V(BitwiseXor) \
- V(BitwiseXorSmi) \
- V(CloneObject) \
- V(CreateArrayFromIterable) \
- V(CreateArrayLiteral) \
- V(CreateEmptyArrayLiteral) \
- V(CreateEmptyObjectLiteral) \
- V(CreateMappedArguments) \
- V(CreateObjectLiteral) \
- V(CreateRestParameter) \
- V(CreateUnmappedArguments) \
- V(Dec) \
- V(DeletePropertySloppy) \
- V(DeletePropertyStrict) \
- V(Div) \
- V(DivSmi) \
- V(Exp) \
- V(ExpSmi) \
- V(ForInContinue) \
- V(ForInEnumerate) \
- V(ForInNext) \
- V(ForInStep) \
- V(GetTemplateObject) \
- V(Inc) \
- V(LdaContextSlot) \
- V(LdaCurrentContextSlot) \
- V(LdaImmutableContextSlot) \
- V(LdaImmutableCurrentContextSlot) \
- V(LogicalNot) \
- V(Mod) \
- V(ModSmi) \
- V(Mul) \
- V(MulSmi) \
- V(Negate) \
- V(SetPendingMessage) \
- V(ShiftLeft) \
- V(ShiftLeftSmi) \
- V(ShiftRight) \
- V(ShiftRightLogical) \
- V(ShiftRightLogicalSmi) \
- V(ShiftRightSmi) \
- V(Sub) \
- V(SubSmi) \
- V(TestEqual) \
- V(TestEqualStrict) \
- V(TestGreaterThan) \
- V(TestGreaterThanOrEqual) \
- V(TestInstanceOf) \
- V(TestLessThan) \
- V(TestLessThanOrEqual) \
- V(TestNull) \
- V(TestReferenceEqual) \
- V(TestTypeOf) \
- V(TestUndefined) \
- V(TestUndetectable) \
- V(ToBooleanLogicalNot) \
- V(ToName) \
- V(ToNumber) \
- V(ToNumeric) \
- V(ToString) \
- V(TypeOf)
-
-#define UNCONDITIONAL_JUMPS_LIST(V) \
- V(Jump) \
- V(JumpConstant) \
- V(JumpLoop)
-
-#define CONDITIONAL_JUMPS_LIST(V) \
- V(JumpIfFalse) \
- V(JumpIfFalseConstant) \
- V(JumpIfJSReceiver) \
- V(JumpIfJSReceiverConstant) \
- V(JumpIfNotNull) \
- V(JumpIfNotNullConstant) \
- V(JumpIfNotUndefined) \
- V(JumpIfNotUndefinedConstant) \
- V(JumpIfNull) \
- V(JumpIfNullConstant) \
- V(JumpIfToBooleanFalse) \
- V(JumpIfToBooleanFalseConstant) \
- V(JumpIfToBooleanTrue) \
- V(JumpIfToBooleanTrueConstant) \
- V(JumpIfTrue) \
- V(JumpIfTrueConstant) \
- V(JumpIfUndefined) \
- V(JumpIfUndefinedConstant)
-
-#define IGNORED_BYTECODE_LIST(V) \
- V(CallNoFeedback) \
- V(LdaNamedPropertyNoFeedback) \
- V(StackCheck) \
- V(StaNamedPropertyNoFeedback) \
- V(ThrowReferenceErrorIfHole) \
- V(ThrowSuperAlreadyCalledIfNotHole) \
- V(ThrowSuperNotCalledIfHole)
-
-#define SUPPORTED_BYTECODE_LIST(V) \
- V(CallAnyReceiver) \
- V(CallProperty) \
- V(CallProperty0) \
- V(CallProperty1) \
- V(CallProperty2) \
- V(CallUndefinedReceiver) \
- V(CallUndefinedReceiver0) \
- V(CallUndefinedReceiver1) \
- V(CallUndefinedReceiver2) \
- V(CallWithSpread) \
- V(Construct) \
- V(ConstructWithSpread) \
- V(CreateClosure) \
- V(ExtraWide) \
- V(GetSuperConstructor) \
- V(Illegal) \
- V(LdaConstant) \
- V(LdaFalse) \
- V(LdaGlobal) \
- V(LdaGlobalInsideTypeof) \
- V(LdaKeyedProperty) \
- V(LdaLookupGlobalSlot) \
- V(LdaLookupGlobalSlotInsideTypeof) \
- V(LdaNamedProperty) \
- V(LdaNull) \
- V(Ldar) \
- V(LdaSmi) \
- V(LdaTheHole) \
- V(LdaTrue) \
- V(LdaUndefined) \
- V(LdaZero) \
- V(Mov) \
- V(Return) \
- V(StaGlobal) \
- V(StaInArrayLiteral) \
- V(StaKeyedProperty) \
- V(StaNamedOwnProperty) \
- V(StaNamedProperty) \
- V(Star) \
- V(TestIn) \
- V(Wide) \
- CLEAR_ENVIRONMENT_LIST(V) \
- CLEAR_ACCUMULATOR_LIST(V) \
- CONDITIONAL_JUMPS_LIST(V) \
- UNCONDITIONAL_JUMPS_LIST(V) \
- IGNORED_BYTECODE_LIST(V)
-
+class CompilationDependencies;
class JSHeapBroker;
-template <typename T>
-struct HandleComparator {
- bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
- return lhs.address() < rhs.address();
- }
-};
-
-struct FunctionBlueprint {
- Handle<SharedFunctionInfo> shared;
- Handle<FeedbackVector> feedback_vector;
-
- bool operator<(const FunctionBlueprint& other) const {
- // A feedback vector is never used for more than one SFI, so it can
- // be used for strict ordering of blueprints.
- DCHECK_IMPLIES(feedback_vector.equals(other.feedback_vector),
- shared.equals(other.shared));
- return HandleComparator<FeedbackVector>()(feedback_vector,
- other.feedback_vector);
- }
-};
-
-class CompilationSubject {
- public:
- explicit CompilationSubject(FunctionBlueprint blueprint)
- : blueprint_(blueprint) {}
- CompilationSubject(Handle<JSFunction> closure, Isolate* isolate);
-
- FunctionBlueprint blueprint() const { return blueprint_; }
- MaybeHandle<JSFunction> closure() const { return closure_; }
-
- private:
- FunctionBlueprint blueprint_;
- MaybeHandle<JSFunction> closure_;
-};
-
-using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
-using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
-using BlueprintsSet = ZoneSet<FunctionBlueprint>;
-
-class Hints {
- public:
- explicit Hints(Zone* zone);
-
- const ConstantsSet& constants() const;
- const MapsSet& maps() const;
- const BlueprintsSet& function_blueprints() const;
-
- void AddConstant(Handle<Object> constant);
- void AddMap(Handle<Map> map);
- void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
-
- void Add(const Hints& other);
-
- void Clear();
- bool IsEmpty() const;
-
- private:
- ConstantsSet constants_;
- MapsSet maps_;
- BlueprintsSet function_blueprints_;
-};
-using HintsVector = ZoneVector<Hints>;
-
enum class SerializerForBackgroundCompilationFlag : uint8_t {
kBailoutOnUninitialized = 1 << 0,
kCollectSourcePositions = 1 << 1,
- kOsr = 1 << 2,
+ kAnalyzeEnvironmentLiveness = 1 << 2,
};
using SerializerForBackgroundCompilationFlags =
base::Flags<SerializerForBackgroundCompilationFlag>;
-// The SerializerForBackgroundCompilation makes sure that the relevant function
-// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
-// optimizations in the compiler, is copied to the heap broker.
-class SerializerForBackgroundCompilation {
- public:
- SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags);
- Hints Run(); // NOTE: Returns empty for an already-serialized function.
-
- class Environment;
-
- private:
- SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments,
- SerializerForBackgroundCompilationFlags flags);
-
- bool BailoutOnUninitialized(FeedbackSlot slot);
-
- void TraverseBytecode();
-
-#define DECLARE_VISIT_BYTECODE(name, ...) \
- void Visit##name(interpreter::BytecodeArrayIterator* iterator);
- SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
-#undef DECLARE_VISIT_BYTECODE
-
- void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, FeedbackSlot slot,
- bool with_spread = false);
- void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
- ConvertReceiverMode receiver_mode,
- bool with_spread = false);
-
- void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
- void MergeAfterJump(interpreter::BytecodeArrayIterator* iterator);
-
- void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
- FeedbackSlot slot, AccessMode mode);
- void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator,
- AccessMode mode);
- void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
- FeedbackSlot slot, AccessMode mode);
-
- GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
- NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
- const MapHandles& maps, AccessMode mode, NameRef const& name);
- ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
- const MapHandles& maps, AccessMode mode);
- void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
- base::Optional<NameRef> static_name);
- void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
-
- Hints RunChildSerializer(CompilationSubject function,
- base::Optional<Hints> new_target,
- const HintsVector& arguments, bool with_spread);
-
- JSHeapBroker* broker() const { return broker_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
- Zone* zone() const { return zone_; }
- Environment* environment() const { return environment_; }
- SerializerForBackgroundCompilationFlags flags() const { return flags_; }
-
- JSHeapBroker* const broker_;
- CompilationDependencies* const dependencies_;
- Zone* const zone_;
- Environment* const environment_;
- ZoneUnorderedMap<int, Environment*> stashed_environments_;
- SerializerForBackgroundCompilationFlags const flags_;
-};
+void RunSerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index cab398c160..6deba2b002 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -16,6 +16,7 @@ namespace internal {
namespace compiler {
namespace {
+static const int kNumLanes64 = 2;
static const int kNumLanes32 = 4;
static const int kNumLanes16 = 8;
static const int kNumLanes8 = 16;
@@ -76,6 +77,8 @@ void SimdScalarLowering::LowerGraph() {
}
}
+#define FOREACH_INT64X2_OPCODE(V) V(I64x2Splat)
+
#define FOREACH_INT32X4_OPCODE(V) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
@@ -119,6 +122,8 @@ void SimdScalarLowering::LowerGraph() {
V(S1x16AnyTrue) \
V(S1x16AllTrue)
+#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat)
+
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
@@ -208,8 +213,12 @@ void SimdScalarLowering::LowerGraph() {
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
+ case SimdType::kFloat64x2:
+ return MachineType::Float64();
case SimdType::kFloat32x4:
return MachineType::Float32();
+ case SimdType::kInt64x2:
+ return MachineType::Int64();
case SimdType::kInt32x4:
return MachineType::Int32();
case SimdType::kInt16x8:
@@ -223,6 +232,14 @@ MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
#define CASE_STMT(name) case IrOpcode::k##name:
+ FOREACH_FLOAT64X2_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kFloat64x2;
+ break;
+ }
+ FOREACH_INT64X2_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt64x2;
+ break;
+ }
FOREACH_INT32X4_OPCODE(CASE_STMT)
case IrOpcode::kReturn:
case IrOpcode::kParameter:
@@ -326,7 +343,9 @@ static int GetReturnCountAfterLoweringSimd128(
int SimdScalarLowering::NumLanes(SimdType type) {
int num_lanes = 0;
- if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
+ if (type == SimdType::kFloat64x2 || type == SimdType::kInt64x2) {
+ num_lanes = kNumLanes64;
+ } else if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
num_lanes = kNumLanes32;
} else if (type == SimdType::kInt16x8) {
num_lanes = kNumLanes16;
@@ -1198,7 +1217,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
-#undef F32x4_UNOP_CASE
+#undef F32X4_UNOP_CASE
case IrOpcode::kF32x4RecipApprox:
case IrOpcode::kF32x4RecipSqrtApprox: {
DCHECK_EQ(1, node->InputCount());
@@ -1223,8 +1242,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
- case IrOpcode::kI32x4Splat:
+ case IrOpcode::kF64x2Splat:
case IrOpcode::kF32x4Splat:
+ case IrOpcode::kI64x2Splat:
+ case IrOpcode::kI32x4Splat:
case IrOpcode::kI16x8Splat:
case IrOpcode::kI8x16Splat: {
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -1347,7 +1368,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kS8x16Shuffle: {
DCHECK_EQ(2, node->InputCount());
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
+ const uint8_t* shuffle = S8x16ShuffleOf(node->op());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_node = zone()->NewArray<Node*>(16);
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 01ea195bdc..76723fcc77 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -32,7 +32,14 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t { kFloat32x4, kInt32x4, kInt16x8, kInt8x16 };
+ enum class SimdType : uint8_t {
+ kFloat64x2,
+ kFloat32x4,
+ kInt64x2,
+ kInt32x4,
+ kInt16x8,
+ kInt8x16
+ };
#if defined(V8_TARGET_BIG_ENDIAN)
static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 8bc0e7af7b..b028a76bb0 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -22,8 +23,8 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/numbers/conversions-inl.h"
-#include "src/utils/address-map.h"
#include "src/objects/objects.h"
+#include "src/utils/address-map.h"
namespace v8 {
namespace internal {
@@ -279,7 +280,8 @@ class RepresentationSelector {
RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
RepresentationChanger* changer,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins)
+ NodeOriginTable* node_origins,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
zone_(zone),
count_(jsgraph->graph()->NodeCount()),
@@ -296,7 +298,8 @@ class RepresentationSelector {
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
- op_typer_(broker, graph_zone()) {
+ op_typer_(broker, graph_zone()),
+ tick_counter_(tick_counter) {
}
// Forward propagation of types from type feedback.
@@ -444,6 +447,7 @@ class RepresentationSelector {
break; \
}
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(Name) \
@@ -747,21 +751,32 @@ class RepresentationSelector {
!GetUpperBound(node->InputAt(1)).Maybe(type);
}
+ void ChangeToDeadValue(Node* node, Node* effect, Node* control) {
+ DCHECK(TypeOf(node).IsNone());
+ // If the node is unreachable, insert an Unreachable node and mark the
+ // value dead.
+ // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
+ // InsertUnreachableIfNecessary.
+ Node* unreachable = effect =
+ graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control);
+ const Operator* dead_value =
+ jsgraph_->common()->DeadValue(GetInfo(node)->representation());
+ node->ReplaceInput(0, unreachable);
+ node->TrimInputCount(dead_value->ValueInputCount());
+ ReplaceEffectControlUses(node, effect, control);
+ NodeProperties::ChangeOp(node, dead_value);
+ }
+
void ChangeToPureOp(Node* node, const Operator* new_op) {
DCHECK(new_op->HasProperty(Operator::kPure));
+ DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount());
if (node->op()->EffectInputCount() > 0) {
DCHECK_LT(0, node->op()->ControlInputCount());
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
if (TypeOf(node).IsNone()) {
- // If the node is unreachable, insert an Unreachable node and mark the
- // value dead.
- // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
- // InsertUnreachableIfNecessary.
- Node* unreachable = effect = graph()->NewNode(
- jsgraph_->common()->Unreachable(), effect, control);
- new_op = jsgraph_->common()->DeadValue(GetInfo(node)->representation());
- node->ReplaceInput(0, unreachable);
+ ChangeToDeadValue(node, effect, control);
+ return;
}
// Rewire the effect and control chains.
node->TrimInputCount(new_op->ValueInputCount());
@@ -772,6 +787,30 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node, new_op);
}
+ void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op,
+ int new_input_index, Node* new_input) {
+ DCHECK(new_op->HasProperty(Operator::kPure));
+ DCHECK_EQ(new_op->ValueInputCount(), 2);
+ DCHECK_EQ(node->op()->ValueInputCount(), 1);
+ DCHECK_LE(0, new_input_index);
+ DCHECK_LE(new_input_index, 1);
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (TypeOf(node).IsNone()) {
+ ChangeToDeadValue(node, effect, control);
+ return;
+ }
+ node->TrimInputCount(node->op()->ValueInputCount());
+ ReplaceEffectControlUses(node, effect, control);
+ } else {
+ DCHECK_EQ(0, node->op()->ControlInputCount());
+ }
+ node->InsertInput(jsgraph_->zone(), new_input_index, new_input);
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
// Converts input {index} of {node} according to given UseInfo {use},
// assuming the type of the input is {input_type}. If {input_type} is null,
// it takes the input from the input node {TypeOf(node->InputAt(index))}.
@@ -804,6 +843,10 @@ class RepresentationSelector {
}
void ProcessInput(Node* node, int index, UseInfo use) {
+ DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
+ !node->op()->HasProperty(Operator::kNoDeopt) &&
+ node->op()->EffectInputCount() > 0);
+
switch (phase_) {
case PROPAGATE:
EnqueueInput(node, index, use);
@@ -958,7 +1001,8 @@ class RepresentationSelector {
return MachineRepresentation::kWord32;
} else if (type.Is(Type::Boolean())) {
return MachineRepresentation::kBit;
- } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsFloat64()) {
+ } else if (type.Is(Type::NumberOrOddball()) &&
+ use.TruncatesOddballAndBigIntToNumber()) {
return MachineRepresentation::kFloat64;
} else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) {
// TODO(turbofan): For Phis that return either NaN or some Smi, it's
@@ -968,6 +1012,8 @@ class RepresentationSelector {
return MachineRepresentation::kTagged;
} else if (type.Is(Type::Number())) {
return MachineRepresentation::kFloat64;
+ } else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) {
+ return MachineRepresentation::kWord64;
} else if (type.Is(Type::ExternalPointer())) {
return MachineType::PointerRepresentation();
}
@@ -1109,8 +1155,11 @@ class RepresentationSelector {
if (IsAnyCompressed(rep)) {
return MachineType::AnyCompressed();
}
- // Word64 representation is only valid for safe integer values.
if (rep == MachineRepresentation::kWord64) {
+ if (type.Is(Type::BigInt())) {
+ return MachineType::AnyTagged();
+ }
+
DCHECK(type.Is(TypeCache::Get()->kSafeInteger));
return MachineType(rep, MachineSemantic::kInt64);
}
@@ -1126,7 +1175,17 @@ class RepresentationSelector {
void VisitStateValues(Node* node) {
if (propagate()) {
for (int i = 0; i < node->InputCount(); i++) {
- EnqueueInput(node, i, UseInfo::Any());
+ // When lowering 64 bit BigInts to Word64 representation, we have to
+ // make sure they are rematerialized before deoptimization. By
+ // propagating a AnyTagged use, the RepresentationChanger is going to
+ // insert the necessary conversions.
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) {
+ EnqueueInput(node, i, UseInfo::AnyTagged());
+ } else {
+ EnqueueInput(node, i, UseInfo::Any());
+ }
}
} else if (lower()) {
Zone* zone = jsgraph_->zone();
@@ -1135,6 +1194,12 @@ class RepresentationSelector {
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(input).Is(Type::BigInt())) {
+ ProcessInput(node, i, UseInfo::AnyTagged());
+ }
+
(*types)[i] =
DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
@@ -1621,6 +1686,8 @@ class RepresentationSelector {
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
+ tick_counter_->DoTick();
+
// Unconditionally eliminate unused pure nodes (only relevant if there's
// a pure operation in between two effectful ones, where the last one
// is unused).
@@ -1715,13 +1782,15 @@ class RepresentationSelector {
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric: {
+ DCHECK(NodeProperties::GetType(node).Is(Type::Union(
+ Type::BigInt(), Type::NumberOrOddball(), graph()->zone())));
VisitInputs(node);
// TODO(bmeurer): Optimize somewhat based on input type?
if (truncation.IsUsedAsWord32()) {
SetOutput(node, MachineRepresentation::kWord32);
if (lower())
lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this);
- } else if (truncation.IsUsedAsFloat64()) {
+ } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
SetOutput(node, MachineRepresentation::kFloat64);
if (lower())
lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this);
@@ -2461,6 +2530,20 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kCheckBigInt: {
+ if (InputIs(node, Type::BigInt())) {
+ VisitNoop(node, truncation);
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
+ case IrOpcode::kBigIntAsUintN: {
+ ProcessInput(node, 0, UseInfo::TruncatingWord64());
+ SetOutput(node, MachineRepresentation::kWord64, Type::BigInt());
+ return;
+ }
case IrOpcode::kNumberAcos:
case IrOpcode::kNumberAcosh:
case IrOpcode::kNumberAsin:
@@ -2621,6 +2704,43 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kSpeculativeBigIntAdd: {
+ if (truncation.IsUsedAsWord64()) {
+ VisitBinop(node,
+ UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+ MachineRepresentation::kWord64);
+ if (lower()) {
+ ChangeToPureOp(node, lowering->machine()->Int64Add());
+ }
+ } else {
+ VisitBinop(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd());
+ }
+ }
+ return;
+ }
+ case IrOpcode::kSpeculativeBigIntNegate: {
+ if (truncation.IsUsedAsWord64()) {
+ VisitUnop(node,
+ UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+ MachineRepresentation::kWord64);
+ if (lower()) {
+ ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0,
+ jsgraph_->Int64Constant(0));
+ }
+ } else {
+ VisitUnop(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ ChangeToPureOp(node, lowering->simplified()->BigIntNegate());
+ }
+ }
+ return;
+ }
case IrOpcode::kStringConcat: {
// TODO(turbofan): We currently depend on having this first length input
// to make sure that the overflow check is properly scheduled before the
@@ -2657,6 +2777,10 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kStringFromCodePointAt: {
+ return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
+ MachineRepresentation::kTaggedPointer);
+ }
case IrOpcode::kStringIndexOf: {
ProcessInput(node, 0, UseInfo::AnyTagged());
ProcessInput(node, 1, UseInfo::AnyTagged());
@@ -2983,7 +3107,7 @@ class RepresentationSelector {
simplified()->PlainPrimitiveToWord32());
}
}
- } else if (truncation.IsUsedAsFloat64()) {
+ } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
if (InputIs(node, Type::NumberOrOddball())) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
@@ -3236,7 +3360,7 @@ class RepresentationSelector {
// identifies NaN and undefined, we can just pass along
// the {truncation} and completely wipe the {node}.
if (truncation.IsUnused()) return VisitUnused(node);
- if (truncation.IsUsedAsFloat64()) {
+ if (truncation.TruncatesOddballAndBigIntToNumber()) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -3263,7 +3387,7 @@ class RepresentationSelector {
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else if (InputIs(node, Type::NumberOrOddball()) &&
- truncation.IsUsedAsFloat64()) {
+ truncation.TruncatesOddballAndBigIntToNumber()) {
// Propagate the Float64 truncation.
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
@@ -3431,6 +3555,9 @@ class RepresentationSelector {
return SetOutput(node, MachineRepresentation::kNone);
case IrOpcode::kStaticAssert:
return VisitUnop(node, UseInfo::Any(), MachineRepresentation::kTagged);
+ case IrOpcode::kAssertType:
+ return VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
default:
FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
@@ -3534,6 +3661,7 @@ class RepresentationSelector {
NodeOriginTable* node_origins_;
TypeCache const* type_cache_;
OperationTyper op_typer_; // helper for the feedback typer
+ TickCounter* const tick_counter_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -3547,19 +3675,22 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
Zone* zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level)
+ PoisoningMitigationLevel poisoning_level,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
node_origins_(node_origins),
- poisoning_level_(poisoning_level) {}
+ poisoning_level_(poisoning_level),
+ tick_counter_(tick_counter) {}
void SimplifiedLowering::LowerAllNodes() {
- RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
+ RepresentationChanger changer(jsgraph(), broker_);
RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
- source_positions_, node_origins_);
+ source_positions_, node_origins_,
+ tick_counter_);
selector.Run(this);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index e434af9d4f..414e3588d7 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -12,6 +12,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -26,7 +29,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level);
+ PoisoningMitigationLevel poisoning_level,
+ TickCounter* tick_counter);
~SimplifiedLowering() = default;
void LowerAllNodes();
@@ -67,6 +71,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
PoisoningMitigationLevel poisoning_level_;
+ TickCounter* const tick_counter_;
+
Node* Float64Round(Node* const node);
Node* Float64Sign(Node* const node);
Node* Int32Abs(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index ed3cfa8617..4f83635422 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -492,6 +492,18 @@ Handle<Map> FastMapParameterOf(const Operator* op) {
return Handle<Map>::null();
}
+std::ostream& operator<<(std::ostream& os, BigIntOperationHint hint) {
+ switch (hint) {
+ case BigIntOperationHint::kBigInt:
+ return os << "BigInt";
+ }
+ UNREACHABLE();
+}
+
+size_t hash_value(BigIntOperationHint hint) {
+ return static_cast<uint8_t>(hint);
+}
+
std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
@@ -585,12 +597,6 @@ Type AllocateTypeOf(const Operator* op) {
return AllocateParametersOf(op).type();
}
-UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kStringFromSingleCodePoint ||
- op->opcode() == IrOpcode::kStringCodePointAt);
- return OpParameter<UnicodeEncoding>(op);
-}
-
AbortReason AbortReasonOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode());
return static_cast<AbortReason>(OpParameter<int>(op));
@@ -702,9 +708,11 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
V(StringConcat, Operator::kNoProperties, 3, 0) \
V(StringToNumber, Operator::kNoProperties, 1, 0) \
V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
@@ -713,6 +721,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeCompressedSignedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
@@ -723,6 +732,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ChangeCompressedToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToCompressedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToCompressedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
@@ -730,6 +740,8 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
@@ -769,9 +781,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NewConsString, Operator::kNoProperties, 3, 0) \
V(PoisonIndex, Operator::kNoProperties, 1, 0)
-#define EFFECT_DEPENDENT_OP_LIST(V) \
- V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(StringSubstring, Operator::kNoProperties, 3, 1) \
+#define EFFECT_DEPENDENT_OP_LIST(V) \
+ V(BigIntAdd, Operator::kNoProperties, 2, 1) \
+ V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(StringFromCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(StringSubstring, Operator::kNoProperties, 3, 1) \
V(DateNow, Operator::kNoProperties, 0, 1)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -801,6 +816,8 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckNumber, 1, 1) \
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
+ V(CheckBigInt, 1, 1) \
+ V(CheckedInt32ToCompressedSigned, 1, 1) \
V(CheckedInt32ToTaggedSigned, 1, 1) \
V(CheckedInt64ToInt32, 1, 1) \
V(CheckedInt64ToTaggedSigned, 1, 1) \
@@ -895,32 +912,6 @@ struct SimplifiedOperatorGlobalCache final {
DEOPTIMIZE_REASON_LIST(CHECK_IF)
#undef CHECK_IF
- template <UnicodeEncoding kEncoding>
- struct StringCodePointAtOperator final : public Operator1<UnicodeEncoding> {
- StringCodePointAtOperator()
- : Operator1<UnicodeEncoding>(IrOpcode::kStringCodePointAt,
- Operator::kFoldable | Operator::kNoThrow,
- "StringCodePointAt", 2, 1, 1, 1, 1, 0,
- kEncoding) {}
- };
- StringCodePointAtOperator<UnicodeEncoding::UTF16>
- kStringCodePointAtOperatorUTF16;
- StringCodePointAtOperator<UnicodeEncoding::UTF32>
- kStringCodePointAtOperatorUTF32;
-
- template <UnicodeEncoding kEncoding>
- struct StringFromSingleCodePointOperator final
- : public Operator1<UnicodeEncoding> {
- StringFromSingleCodePointOperator()
- : Operator1<UnicodeEncoding>(
- IrOpcode::kStringFromSingleCodePoint, Operator::kPure,
- "StringFromSingleCodePoint", 1, 0, 0, 1, 0, 0, kEncoding) {}
- };
- StringFromSingleCodePointOperator<UnicodeEncoding::UTF16>
- kStringFromSingleCodePointOperatorUTF16;
- StringFromSingleCodePointOperator<UnicodeEncoding::UTF32>
- kStringFromSingleCodePointOperatorUTF32;
-
struct FindOrderedHashMapEntryOperator final : public Operator {
FindOrderedHashMapEntryOperator()
: Operator(IrOpcode::kFindOrderedHashMapEntry, Operator::kEliminatable,
@@ -1236,6 +1227,20 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
static_cast<int>(reason)); // parameter
}
+const Operator* SimplifiedOperatorBuilder::BigIntAsUintN(int bits) {
+ CHECK(0 <= bits && bits <= 64);
+
+ return new (zone()) Operator1<int>(IrOpcode::kBigIntAsUintN, Operator::kPure,
+ "BigIntAsUintN", 1, 0, 0, 1, 0, 0, bits);
+}
+
+const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
+ DCHECK(type.IsRange());
+ return new (zone()) Operator1<Type>(IrOpcode::kAssertType,
+ Operator::kNoThrow | Operator::kNoDeopt,
+ "AssertType", 1, 0, 0, 1, 0, 0, type);
+}
+
const Operator* SimplifiedOperatorBuilder::CheckIf(
DeoptimizeReason reason, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1433,6 +1438,21 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
CheckFloat64HoleParameters(mode, feedback));
}
+const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd(
+ BigIntOperationHint hint) {
+ return new (zone()) Operator1<BigIntOperationHint>(
+ IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow,
+ "SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint);
+}
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
+ BigIntOperationHint hint) {
+ return new (zone()) Operator1<BigIntOperationHint>(
+ IrOpcode::kSpeculativeBigIntNegate,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntNegate", 1, 1,
+ 1, 1, 1, 0, hint);
+}
+
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
NumberOperationHint hint, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1655,28 +1675,6 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
AllocateParameters(type, allocation, allow_large_objects));
}
-const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
- UnicodeEncoding encoding) {
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- return &cache_.kStringCodePointAtOperatorUTF16;
- case UnicodeEncoding::UTF32:
- return &cache_.kStringCodePointAtOperatorUTF32;
- }
- UNREACHABLE();
-}
-
-const Operator* SimplifiedOperatorBuilder::StringFromSingleCodePoint(
- UnicodeEncoding encoding) {
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- return &cache_.kStringFromSingleCodePointOperatorUTF16;
- case UnicodeEncoding::UTF32:
- return &cache_.kStringFromSingleCodePointOperatorUTF32;
- }
- UNREACHABLE();
-}
-
#define SPECULATIVE_NUMBER_BINOP(Name) \
const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \
switch (hint) { \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index d93544c5cd..bdac796adf 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -475,10 +475,15 @@ enum class NumberOperationHint : uint8_t {
kNumberOrOddball, // Inputs were Number or Oddball, output was Number.
};
+enum class BigIntOperationHint : uint8_t {
+ kBigInt,
+};
+
size_t hash_value(NumberOperationHint);
+size_t hash_value(BigIntOperationHint);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
-
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BigIntOperationHint);
V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op)
V8_WARN_UNUSED_RESULT;
@@ -634,6 +639,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberSilenceNaN();
+ const Operator* BigIntAdd();
+ const Operator* BigIntNegate();
+
const Operator* SpeculativeSafeIntegerAdd(NumberOperationHint hint);
const Operator* SpeculativeSafeIntegerSubtract(NumberOperationHint hint);
@@ -653,6 +661,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeNumberLessThanOrEqual(NumberOperationHint hint);
const Operator* SpeculativeNumberEqual(NumberOperationHint hint);
+ const Operator* SpeculativeBigIntAdd(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntNegate(BigIntOperationHint hint);
+ const Operator* BigIntAsUintN(int bits);
+
const Operator* ReferenceEqual();
const Operator* SameValue();
const Operator* SameValueNumbersOnly();
@@ -666,9 +678,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
const Operator* StringCharCodeAt();
- const Operator* StringCodePointAt(UnicodeEncoding encoding);
+ const Operator* StringCodePointAt();
const Operator* StringFromSingleCharCode();
- const Operator* StringFromSingleCodePoint(UnicodeEncoding encoding);
+ const Operator* StringFromSingleCodePoint();
+ const Operator* StringFromCodePointAt();
const Operator* StringIndexOf();
const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
@@ -686,6 +699,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* PlainPrimitiveToWord32();
const Operator* PlainPrimitiveToFloat64();
+ const Operator* ChangeCompressedSignedToInt32();
const Operator* ChangeTaggedSignedToInt32();
const Operator* ChangeTaggedSignedToInt64();
const Operator* ChangeTaggedToInt32();
@@ -695,6 +709,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeTaggedToTaggedSigned();
const Operator* ChangeCompressedToTaggedSigned();
const Operator* ChangeTaggedToCompressedSigned();
+ const Operator* ChangeInt31ToCompressedSigned();
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeInt64ToTagged();
@@ -704,6 +719,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeFloat64ToTaggedPointer();
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
+ const Operator* TruncateBigIntToUint64();
+ const Operator* ChangeUint64ToBigInt();
const Operator* TruncateTaggedToWord32();
const Operator* TruncateTaggedToFloat64();
const Operator* TruncateTaggedToBit();
@@ -740,6 +757,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedInt32Mod();
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
const Operator* CheckedInt32Sub();
+ const Operator* CheckedInt32ToCompressedSigned(
+ const VectorSlotPair& feedback);
const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback);
const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback);
@@ -752,6 +771,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckBigInt(const VectorSlotPair& feedback);
const Operator* CheckedCompressedToTaggedPointer(
const VectorSlotPair& feedback);
const Operator* CheckedCompressedToTaggedSigned(
@@ -874,6 +894,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// Abort (for terminating execution on internal error).
const Operator* RuntimeAbort(AbortReason reason);
+ // Abort if the value input does not inhabit the given type
+ const Operator* AssertType(Type type);
+
const Operator* DateNow();
private:
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index c00613c232..2bb5a0a4b5 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -329,9 +329,7 @@ void StateValuesAccess::iterator::Pop() {
current_depth_--;
}
-
-bool StateValuesAccess::iterator::done() { return current_depth_ < 0; }
-
+bool StateValuesAccess::iterator::done() const { return current_depth_ < 0; }
void StateValuesAccess::iterator::Advance() {
Top()->Advance();
@@ -392,14 +390,12 @@ MachineType StateValuesAccess::iterator::type() {
}
}
-
-bool StateValuesAccess::iterator::operator!=(iterator& other) {
+bool StateValuesAccess::iterator::operator!=(iterator const& other) {
// We only allow comparison with end().
CHECK(other.done());
return !done();
}
-
StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() {
Advance();
return *this;
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 00ec3bb351..0ff5d218f1 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
class V8_EXPORT_PRIVATE iterator {
public:
// Bare minimum of operators needed for range iteration.
- bool operator!=(iterator& other);
+ bool operator!=(iterator const& other);
iterator& operator++();
TypedNode operator*();
@@ -104,7 +104,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
Node* node();
MachineType type();
- bool done();
+ bool done() const;
void Advance();
void EnsureValid();
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 13d8199745..b71bcd7e66 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -6,6 +6,7 @@
#include "src/compiler/store-store-elimination.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
@@ -129,7 +130,8 @@ namespace {
class RedundantStoreFinder final {
public:
- RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone);
+ RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone);
void Find();
@@ -157,6 +159,7 @@ class RedundantStoreFinder final {
ZoneSet<Node*>& to_remove() { return to_remove_; }
JSGraph* const jsgraph_;
+ TickCounter* const tick_counter_;
Zone* const temp_zone_;
ZoneStack<Node*> revisit_;
@@ -199,6 +202,7 @@ void RedundantStoreFinder::Find() {
Visit(jsgraph()->graph()->end());
while (!revisit_.empty()) {
+ tick_counter_->DoTick();
Node* next = revisit_.top();
revisit_.pop();
DCHECK_LT(next->id(), in_revisit_.size());
@@ -230,9 +234,10 @@ bool RedundantStoreFinder::HasBeenVisited(Node* node) {
return !unobservable_for_id(node->id()).IsUnvisited();
}
-void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) {
+void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone) {
// Find superfluous nodes
- RedundantStoreFinder finder(js_graph, temp_zone);
+ RedundantStoreFinder finder(js_graph, tick_counter, temp_zone);
finder.Find();
// Remove superfluous nodes
@@ -336,8 +341,11 @@ bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
}
// Initialize unobservable_ with js_graph->graph->NodeCount() empty sets.
-RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone)
+RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph,
+ TickCounter* tick_counter,
+ Zone* temp_zone)
: jsgraph_(js_graph),
+ tick_counter_(tick_counter),
temp_zone_(temp_zone),
revisit_(temp_zone),
in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
index cda7591fcc..646640a310 100644
--- a/deps/v8/src/compiler/store-store-elimination.h
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -11,11 +11,15 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
class StoreStoreElimination final {
public:
- static void Run(JSGraph* js_graph, Zone* temp_zone);
+ static void Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 4cf2c38bdb..5dbbad3dcd 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/flags.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
@@ -33,13 +34,15 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph)
+Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
+ TickCounter* tick_counter)
: flags_(flags),
graph_(graph),
decorator_(nullptr),
cache_(TypeCache::Get()),
broker_(broker),
- operation_typer_(broker, zone()) {
+ operation_typer_(broker, zone()),
+ tick_counter_(tick_counter) {
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
@@ -47,7 +50,6 @@ Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph)
graph_->AddDecorator(decorator_);
}
-
Typer::~Typer() {
graph_->RemoveDecorator(decorator_);
}
@@ -91,14 +93,18 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return UpdateType(node, TypeBinaryOp(node, x));
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
return UpdateType(node, TypeUnaryOp(node, x));
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -157,14 +163,18 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return TypeBinaryOp(node, x);
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
return TypeUnaryOp(node, x);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -276,14 +286,18 @@ class Typer::Visitor : public Reducer {
return t->operation_typer_.Name(type); \
}
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
#define DECLARE_METHOD(Name) \
static Type Name(Type lhs, Type rhs, Typer* t) { \
return t->operation_typer_.Name(lhs, rhs); \
}
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
static Type ObjectIsArrayBufferView(Type, Typer*);
@@ -410,7 +424,7 @@ void Typer::Run(const NodeVector& roots,
induction_vars->ChangeToInductionVariablePhis();
}
Visitor visitor(this, induction_vars);
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter_);
graph_reducer.AddReducer(&visitor);
for (Node* const root : roots) graph_reducer.ReduceNode(root);
graph_reducer.ReduceGraph();
@@ -798,6 +812,8 @@ Type Typer::Visitor::TypeHeapConstant(Node* node) {
return TypeConstant(HeapConstantOf(node->op()));
}
+Type Typer::Visitor::TypeCompressedHeapConstant(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeExternalConstant(Node* node) {
return Type::ExternalPointer();
}
@@ -2060,6 +2076,10 @@ Type Typer::Visitor::TypeStringFromSingleCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromSingleCodePointTyper);
}
+Type Typer::Visitor::TypeStringFromCodePointAt(Node* node) {
+ return Type::String();
+}
+
Type Typer::Visitor::TypeStringIndexOf(Node* node) {
return Type::Range(-1.0, String::kMaxLength, zone());
}
@@ -2336,6 +2356,8 @@ Type Typer::Visitor::TypeFindOrderedHashMapEntryForInt32Key(Node* node) {
Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeAssertType(Node* node) { UNREACHABLE(); }
+
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index fa87d81f1e..305470d724 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -25,7 +28,8 @@ class V8_EXPORT_PRIVATE Typer {
};
using Flags = base::Flags<Flag>;
- Typer(JSHeapBroker* broker, Flags flags, Graph* graph);
+ Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
+ TickCounter* tick_counter);
~Typer();
void Run();
@@ -49,6 +53,7 @@ class V8_EXPORT_PRIVATE Typer {
TypeCache const* cache_;
JSHeapBroker* broker_;
OperationTyper operation_typer_;
+ TickCounter* const tick_counter_;
Type singleton_false_;
Type singleton_true_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index edf07a4ffd..d4267a75fe 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -6,9 +6,10 @@
#include "src/compiler/types.h"
-#include "src/utils/ostreams.h"
#include "src/handles/handles-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -202,7 +203,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
return kOtherObject;
case JS_ARRAY_TYPE:
return kArray;
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
@@ -312,8 +313,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
- case MODULE_TYPE:
- case MODULE_INFO_ENTRY_TYPE:
+ case SOURCE_TEXT_MODULE_TYPE:
+ case SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE:
+ case SYNTHETIC_MODULE_TYPE:
case CELL_TYPE:
case PREPARSE_DATA_TYPE:
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
@@ -349,6 +351,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case ENUM_CACHE_TYPE:
case SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE:
case WASM_CAPI_FUNCTION_DATA_TYPE:
+ case WASM_INDIRECT_FUNCTION_TABLE_TYPE:
case WASM_DEBUG_INFO_TYPE:
case WASM_EXCEPTION_TAG_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
@@ -363,6 +366,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE:
+#define MAKE_TORQUE_CLASS_TYPE(V) case V:
+ TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
+#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 21aaab5036..0dc1aa77b0 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/common/globals.h"
-#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/heap-refs.h"
#include "src/handles/handles.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects.h"
@@ -220,6 +220,7 @@ namespace compiler {
INTERNAL_BITSET_TYPE_LIST(V) \
PROPER_BITSET_TYPE_LIST(V)
+class JSHeapBroker;
class HeapConstantType;
class OtherNumberConstantType;
class TupleType;
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 3f1b2e9f13..d3d4d54ea2 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -431,6 +431,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kHeapConstant:
+ case IrOpcode::kCompressedHeapConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is anything.
@@ -933,7 +934,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kComment:
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
case IrOpcode::kDebugBreak:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
@@ -975,6 +976,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kSpeculativeBigIntAdd:
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kSpeculativeBigIntNegate:
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntAsUintN:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntAdd:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckValueInputIs(node, 1, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntNegate:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract:
case IrOpcode::kNumberMultiply:
@@ -1156,6 +1176,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kStringFromCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kStringIndexOf:
// (String, String, SignedSmall) -> SignedSmall
CheckValueInputIs(node, 0, Type::String());
@@ -1306,6 +1332,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
+ case IrOpcode::kChangeCompressedSignedToInt32:
case IrOpcode::kChangeTaggedSignedToInt32: {
// Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1360,6 +1387,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeInt31ToCompressedSigned:
case IrOpcode::kChangeInt31ToTaggedSigned: {
// Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1429,6 +1457,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kTruncateBigIntToUint64:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kChangeUint64ToBigInt:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kTruncateTaggedToBit:
case IrOpcode::kTruncateTaggedPointerToBit:
break;
@@ -1498,6 +1534,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedUint32Div:
case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedInt32Mul:
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedInt64ToInt32:
case IrOpcode::kCheckedInt64ToTaggedSigned:
@@ -1520,6 +1557,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedTaggedToCompressedSigned:
case IrOpcode::kCheckedTaggedToCompressedPointer:
case IrOpcode::kCheckedTruncateTaggedToWord32:
+ case IrOpcode::kAssertType:
break;
case IrOpcode::kCheckFloat64Hole:
@@ -1619,6 +1657,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_EQ(0, value_count);
CheckTypeIs(node, Type::Number());
break;
+ case IrOpcode::kCheckBigInt:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::BigInt());
+ break;
// Machine operators
// -----------------------
@@ -1755,6 +1797,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeInt32ToInt64:
@@ -1800,6 +1843,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
+ case IrOpcode::kMemoryBarrier:
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicExchange:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 3396214e58..2da7177ece 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -14,6 +14,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/assembler.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/compiler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
@@ -276,8 +277,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
}
Node* WasmGraphBuilder::RefNull() {
- return LOAD_INSTANCE_FIELD(NullValue,
- MachineType::TypeCompressedTaggedPointer());
+ Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ return LOAD_TAGGED_POINTER(
+ isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue));
}
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
@@ -2195,8 +2197,8 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
@@ -2334,8 +2336,8 @@ Node** WasmGraphBuilder::GetExceptionValues(
BuildDecodeException32BitValue(values_array, &index));
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
@@ -2853,25 +2855,69 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
Node** args, Node*** rets,
wasm::WasmCodePosition position) {
- if (table_index == 0) {
- return BuildIndirectCall(sig_index, args, rets, position, kCallContinues);
- }
return BuildIndirectCall(table_index, sig_index, args, rets, position,
kCallContinues);
}
-Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
+void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
+ Node** ift_size,
+ Node** ift_sig_ids,
+ Node** ift_targets,
+ Node** ift_instances) {
+ if (table_index == 0) {
+ *ift_size =
+ LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
+ *ift_sig_ids = LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds,
+ MachineType::Pointer());
+ *ift_targets = LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets,
+ MachineType::Pointer());
+ *ift_instances = LOAD_INSTANCE_FIELD(
+ IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer());
+ return;
+ }
+
+ Node* ift_tables = LOAD_INSTANCE_FIELD(
+ IndirectFunctionTables, MachineType::TypeCompressedTaggedPointer());
+ Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index);
+
+ *ift_size = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset),
+ MachineType::Int32());
+
+ *ift_sig_ids = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset),
+ MachineType::Pointer());
+
+ *ift_targets = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset),
+ MachineType::Pointer());
+
+ *ift_instances = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset),
+ MachineType::TypeCompressedTaggedPointer());
+}
+
+Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
+ uint32_t sig_index, Node** args,
Node*** rets,
wasm::WasmCodePosition position,
IsReturnCall continuation) {
DCHECK_NOT_NULL(args[0]);
DCHECK_NOT_NULL(env_);
- // Assume only one table for now.
- wasm::FunctionSig* sig = env_->module->signatures[sig_index];
+ // First we have to load the table.
+ Node* ift_size;
+ Node* ift_sig_ids;
+ Node* ift_targets;
+ Node* ift_instances;
+ LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets,
+ &ift_instances);
- Node* ift_size =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
+ wasm::FunctionSig* sig = env_->module->signatures[sig_index];
MachineOperatorBuilder* machine = mcgraph()->machine();
Node* key = args[0];
@@ -2894,9 +2940,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
}
// Load signature from the table and check.
- Node* ift_sig_ids =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer());
-
int32_t expected_sig_id = env_->module->signature_ids[sig_index];
Node* int32_scaled_key = Uint32ToUintptr(
graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
@@ -2909,11 +2952,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- Node* ift_targets =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer());
- Node* ift_instances = LOAD_INSTANCE_FIELD(
- IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer());
-
Node* tagged_scaled_key;
if (kTaggedSize == kInt32Size) {
tagged_scaled_key = int32_scaled_key;
@@ -2955,48 +2993,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
}
}
-Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
- uint32_t sig_index, Node** args,
- Node*** rets,
- wasm::WasmCodePosition position,
- IsReturnCall continuation) {
- DCHECK_NOT_NULL(args[0]);
- Node* entry_index = args[0];
- DCHECK_NOT_NULL(env_);
- BoundsCheckTable(table_index, entry_index, position, wasm::kTrapFuncInvalid,
- nullptr);
-
- DCHECK(Smi::IsValid(table_index));
- DCHECK(Smi::IsValid(sig_index));
- Node* runtime_args[]{
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- BuildChangeUint31ToSmi(entry_index),
- graph()->NewNode(mcgraph()->common()->NumberConstant(sig_index))};
-
- Node* target_instance = BuildCallToRuntime(
- Runtime::kWasmIndirectCallCheckSignatureAndGetTargetInstance,
- runtime_args, arraysize(runtime_args));
-
- // We reuse the runtime_args array here, even though we only need the first
- // two arguments.
- Node* call_target = BuildCallToRuntime(
- Runtime::kWasmIndirectCallGetTargetAddress, runtime_args, 2);
-
- wasm::FunctionSig* sig = env_->module->signatures[sig_index];
- args[0] = call_target;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
- switch (continuation) {
- case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance,
- use_retpoline);
- case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance,
- use_retpoline);
- }
-}
-
Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
@@ -3019,9 +3015,6 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
uint32_t sig_index, Node** args,
wasm::WasmCodePosition position) {
- if (table_index == 0) {
- return BuildIndirectCall(sig_index, args, nullptr, position, kReturnCall);
- }
return BuildIndirectCall(table_index, sig_index, args, nullptr, position,
kReturnCall);
}
@@ -3324,13 +3317,6 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
return result;
}
-Node* WasmGraphBuilder::BuildLoadBuiltinFromInstance(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
- return LOAD_TAGGED_POINTER(isolate_root,
- IsolateData::builtin_slot_offset(builtin_index));
-}
-
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded js_context.
Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(
@@ -3492,7 +3478,7 @@ void WasmGraphBuilder::GetTableBaseAndOffset(uint32_t table_index,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
}
-Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
+Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position) {
if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) {
Node* base = nullptr;
@@ -3501,7 +3487,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
return LOAD_RAW_NODE_OFFSET(base, offset,
MachineType::TypeCompressedTagged());
}
- // We access anyfunc tables through runtime calls.
+ // We access funcref tables through runtime calls.
WasmTableGetDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
@@ -3521,7 +3507,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
Effect(), Control())));
}
-Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
+Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position) {
if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) {
Node* base = nullptr;
@@ -3530,7 +3516,7 @@ Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
return STORE_RAW_NODE_OFFSET(
base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
} else {
- // We access anyfunc tables through runtime calls.
+ // We access funcref tables through runtime calls.
WasmTableSetDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
@@ -4000,6 +3986,30 @@ Node* WasmGraphBuilder::S128Zero() {
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF64x2Splat:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]);
+ case wasm::kExprF64x2Abs:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]);
+ case wasm::kExprF64x2Neg:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]);
+ case wasm::kExprF64x2Eq:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Eq(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Lt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Le:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Gt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[1],
+ inputs[0]);
+ case wasm::kExprF64x2Ge:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1],
+ inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4054,6 +4064,49 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Ge:
return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1],
inputs[0]);
+ case wasm::kExprI64x2Splat:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
+ case wasm::kExprI64x2Neg:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
+ case wasm::kExprI64x2Add:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Sub:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Mul:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Eq:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]);
case wasm::kExprI32x4SConvertF32x4:
@@ -4305,6 +4358,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128Select:
return graph()->NewNode(mcgraph()->machine()->S128Select(), inputs[2],
inputs[0], inputs[1]);
+ case wasm::kExprS1x2AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->S1x2AnyTrue(), inputs[0]);
+ case wasm::kExprS1x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->S1x2AllTrue(), inputs[0]);
case wasm::kExprS1x4AnyTrue:
return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]);
case wasm::kExprS1x4AllTrue:
@@ -4326,12 +4383,24 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF64x2ExtractLane:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprF64x2ReplaceLane:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprF32x4ExtractLane:
return graph()->NewNode(mcgraph()->machine()->F32x4ExtractLane(lane),
inputs[0]);
case wasm::kExprF32x4ReplaceLane:
return graph()->NewNode(mcgraph()->machine()->F32x4ReplaceLane(lane),
inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtractLane:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprI64x2ReplaceLane:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprI32x4ExtractLane:
return graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
inputs[0]);
@@ -4359,6 +4428,14 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprI64x2Shl:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Shl(shift), inputs[0]);
+ case wasm::kExprI64x2ShrS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(shift),
+ inputs[0]);
+ case wasm::kExprI64x2ShrU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(shift),
+ inputs[0]);
case wasm::kExprI32x4Shl:
return graph()->NewNode(mcgraph()->machine()->I32x4Shl(shift), inputs[0]);
case wasm::kExprI32x4ShrS:
@@ -4612,6 +4689,11 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
return SetEffect(node);
}
+Node* WasmGraphBuilder::AtomicFence() {
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(),
+ Effect(), Control()));
+}
+
#undef ATOMIC_BINOP_LIST
#undef ATOMIC_CMP_EXCHG_LIST
#undef ATOMIC_LOAD_LIST
@@ -4636,8 +4718,19 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
- Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
auto m = mcgraph()->machine();
+ auto common = mcgraph()->common();
+ Node* size_null_check =
+ graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
+
+ Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
Node* seg_index = Uint32Constant(data_segment_index);
Node* src_fail;
@@ -4679,9 +4772,16 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds,
- graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
- position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds,
+ graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
@@ -4699,16 +4799,19 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
auto m = mcgraph()->machine();
- // The data must be copied backward if the regions overlap and src < dst. The
- // regions overlap if {src + size > dst && dst + size > src}. Since we already
- // test that {src < dst}, we know that {dst + size > src}, so this simplifies
- // to just {src + size > dst}. That sum can overflow, but if we subtract
- // {size} from both sides of the inequality we get the equivalent test
- // {size > dst - src}.
- Node* copy_backward = graph()->NewNode(
- m->Word32And(), graph()->NewNode(m->Uint32LessThan(), src, dst),
- graph()->NewNode(m->Uint32LessThan(),
- graph()->NewNode(m->Int32Sub(), dst, src), size));
+ auto common = mcgraph()->common();
+ // If size == 0, then memory.copy is a no-op.
+ Node* size_null_check =
+ graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
+ // The data must be copied backward if src < dst.
+ Node* copy_backward = graph()->NewNode(m->Uint32LessThan(), src, dst);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
@@ -4728,13 +4831,32 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds,
- graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
- position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds,
+ graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
+ auto machine = mcgraph()->machine();
+ auto common = mcgraph()->common();
+ // If size == 0, then memory.copy is a no-op.
+ Node* size_null_check = graph()->NewNode(machine->Word32Equal(), size,
+ mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
Node* fail = BoundsCheckMemRange(&dst, &size, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_fill()));
@@ -4742,7 +4864,15 @@ Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, value, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped(
@@ -4789,13 +4919,13 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
mcgraph()->Int32Constant(1), Effect(), Control()));
}
-Node* WasmGraphBuilder::TableCopy(uint32_t table_src_index,
- uint32_t table_dst_index, Node* dst,
+Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
+ uint32_t table_src_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
@@ -4878,28 +5008,6 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
}
namespace {
-bool must_record_function_compilation(Isolate* isolate) {
- return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
-}
-
-PRINTF_FORMAT(4, 5)
-void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
- Isolate* isolate, Handle<Code> code,
- const char* format, ...) {
- DCHECK(must_record_function_compilation(isolate));
-
- ScopedVector<char> buffer(128);
- va_list arguments;
- va_start(arguments, format);
- int len = VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
- PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *name_str));
-}
-
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig,
@@ -4914,12 +5022,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
- Node* target = (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
- ? mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmAllocateHeapNumber,
- RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(
- BUILTIN_CODE(isolate_, AllocateHeapNumber));
+ Node* target =
+ (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+ ? mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmAllocateHeapNumber,
+ RelocInfo::WASM_STUB_CALL)
+ : BuildLoadBuiltinFromInstance(Builtins::kAllocateHeapNumber);
if (!allocate_heap_number_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0,
@@ -4956,6 +5064,34 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return mcgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
+ Node* BuildLoadUndefinedValueFromInstance() {
+ if (undefined_value_node_ == nullptr) {
+ Node* isolate_root = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Pointer()),
+ instance_node_.get(),
+ mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(IsolateRoot)),
+ graph()->start(), graph()->start());
+ undefined_value_node_ = InsertDecompressionIfNeeded(
+ MachineType::TypeCompressedTaggedPointer(),
+ graph()->NewNode(
+ mcgraph()->machine()->Load(
+ MachineType::TypeCompressedTaggedPointer()),
+ isolate_root,
+ mcgraph()->Int32Constant(
+ IsolateData::root_slot_offset(RootIndex::kUndefinedValue)),
+ isolate_root, graph()->start()));
+ }
+ return undefined_value_node_.get();
+ }
+
+ Node* BuildLoadBuiltinFromInstance(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ Node* isolate_root =
+ LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ return LOAD_TAGGED_POINTER(isolate_root,
+ IsolateData::builtin_slot_offset(builtin_index));
+ }
+
Node* BuildChangeInt32ToTagged(Node* value) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
@@ -5096,7 +5232,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber));
+ : BuildLoadBuiltinFromInstance(Builtins::kToNumber);
Node* result = SetEffect(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code,
@@ -5126,8 +5262,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetControl(is_heap_object.if_true);
Node* orig_effect = Effect();
- Node* undefined_node = LOAD_INSTANCE_FIELD(
- UndefinedValue, MachineType::TypeCompressedTaggedPointer());
+ Node* undefined_node = BuildLoadUndefinedValueFromInstance();
Node* check_undefined =
graph()->NewNode(machine->WordEqual(), value, undefined_node);
Node* effect_tagged = Effect();
@@ -5173,8 +5308,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kWasmF64:
return BuildChangeFloat64ToTagged(node);
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
return node;
default:
UNREACHABLE();
@@ -5196,7 +5331,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, I64ToBigInt));
+ : BuildLoadBuiltinFromInstance(Builtins::kI64ToBigInt);
return SetEffect(
SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
@@ -5218,7 +5353,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, BigIntToI64));
+ : BuildLoadBuiltinFromInstance(Builtins::kBigIntToI64);
return SetEffect(SetControl(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target,
@@ -5228,15 +5363,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) {
DCHECK_NE(wasm::kWasmStmt, type);
- // The parameter is of type anyref or except_ref, we take it as is.
- if (type == wasm::kWasmAnyRef || type == wasm::kWasmExceptRef) {
+ // The parameter is of type anyref or exnref, we take it as is.
+ if (type == wasm::kWasmAnyRef || type == wasm::kWasmExnRef) {
return node;
}
- if (type == wasm::kWasmAnyFunc) {
+ if (type == wasm::kWasmFuncRef) {
Node* check =
BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
- Runtime::kWasmIsValidAnyFuncValue, js_context, &node, 1, effect_,
+ Runtime::kWasmIsValidFuncRefValue, js_context, &node, 1, effect_,
Control())));
Diamond type_check(graph(), mcgraph()->common(), check,
@@ -5471,8 +5606,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The callable is passed as the last parameter, after WASM arguments.
Node* callable_node = Param(wasm_count + 1);
- Node* undefined_node = LOAD_INSTANCE_FIELD(
- UndefinedValue, MachineType::TypeCompressedTaggedPointer());
+ Node* undefined_node = BuildLoadUndefinedValueFromInstance();
Node* call = nullptr;
bool sloppy_receiver = true;
@@ -5811,22 +5945,26 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
void BuildCWasmEntry() {
- // Build the start and the JS parameter nodes.
- SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5)));
+ // +1 offset for first parameter index being -1.
+ SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 1)));
- // Create parameter nodes (offset by 1 for the receiver parameter).
- Node* code_entry = Param(CWasmEntryParameters::kCodeEntry + 1);
- Node* object_ref_node = Param(CWasmEntryParameters::kObjectRef + 1);
- Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
+ Node* code_entry = Param(CWasmEntryParameters::kCodeEntry);
+ Node* object_ref = Param(CWasmEntryParameters::kObjectRef);
+ Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer);
+ Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp);
+
+ Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
+ STORE_RAW(fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset,
+ c_entry_fp, MachineType::PointerRepresentation(),
+ kNoWriteBarrier);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- int arg_count =
- wasm_arg_count + 4; // code, object_ref_node, control, effect
+ int arg_count = wasm_arg_count + 4; // code, object_ref, control, effect
Node** args = Buffer(arg_count);
int pos = 0;
args[pos++] = code_entry;
- args[pos++] = object_ref_node;
+ args[pos++] = object_ref;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -5847,26 +5985,43 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* call = SetEffect(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), arg_count, args));
- // Store the return value.
- DCHECK_GE(1, sig_->return_count());
- if (sig_->return_count() == 1) {
+ Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call);
+ Node* if_exception =
+ graph()->NewNode(mcgraph()->common()->IfException(), call, call);
+
+ // Handle exception: return it.
+ SetControl(if_exception);
+ Return(if_exception);
+
+ // Handle success: store the return value(s).
+ SetControl(if_success);
+ pos = 0;
+ offset = 0;
+ for (wasm::ValueType type : sig_->returns()) {
StoreRepresentation store_rep(
- wasm::ValueTypes::MachineRepresentationFor(sig_->GetReturn()),
- kNoWriteBarrier);
+ wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier);
+ Node* value = sig_->return_count() == 1
+ ? call
+ : graph()->NewNode(mcgraph()->common()->Projection(pos),
+ call, Control());
SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep),
- arg_buffer, Int32Constant(0), call, Effect(),
- Control()));
+ arg_buffer, Int32Constant(offset), value,
+ Effect(), Control()));
+ offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ pos++;
}
+
Return(jsgraph()->SmiConstant(0));
if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) {
MachineRepresentation sig_reps[] = {
- MachineRepresentation::kWord32, // return value
- MachineRepresentation::kTagged, // receiver
- MachineRepresentation::kTagged, // arg0 (code)
- MachineRepresentation::kTagged // arg1 (buffer)
+ MachineType::PointerRepresentation(), // return value
+ MachineType::PointerRepresentation(), // target
+ MachineRepresentation::kTagged, // object_ref
+ MachineType::PointerRepresentation(), // argv
+ MachineType::PointerRepresentation() // c_entry_fp
};
- Signature<MachineRepresentation> c_entry_sig(1, 2, sig_reps);
+ Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps);
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(),
mcgraph()->common(), mcgraph()->zone(), &c_entry_sig);
r.LowerGraph();
@@ -5879,6 +6034,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Isolate* const isolate_;
JSGraph* jsgraph_;
StubCallMode stub_mode_;
+ SetOncePointer<Node> undefined_value_node_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
wasm::WasmFeatures enabled_features_;
};
@@ -5901,27 +6057,25 @@ void AppendSignature(char* buffer, size_t max_name_len,
} // namespace
-MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
- wasm::FunctionSig* sig,
- bool is_import) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileJSToWasmWrapper");
+std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+ Isolate* isolate, wasm::FunctionSig* sig, bool is_import) {
//----------------------------------------------------------------------------
// Create the Graph.
//----------------------------------------------------------------------------
- Zone zone(isolate->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
+ std::unique_ptr<Zone> zone =
+ base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ Graph* graph = new (zone.get()) Graph(zone.get());
+ CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
+ zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
StubCallMode::kCallCodeObject,
wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
@@ -5929,73 +6083,66 @@ MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
builder.BuildJSToWasmWrapper(is_import);
//----------------------------------------------------------------------------
- // Run the compilation pipeline.
+ // Create the compilation job.
//----------------------------------------------------------------------------
static constexpr size_t kMaxNameLen = 128;
- char debug_name[kMaxNameLen] = "js_to_wasm:";
- AppendSignature(debug_name, kMaxNameLen, sig);
+ auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(debug_name.get(), "js_to_wasm:", 12);
+ AppendSignature(debug_name.get(), kMaxNameLen, sig);
- // Schedule and compile to machine code.
int params = static_cast<int>(sig->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
- &zone, false, params + 1, CallDescriptor::kNoFlags);
+ zone.get(), false, params + 1, CallDescriptor::kNoFlags);
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
- isolate, incoming, &graph, Code::JS_TO_WASM_FUNCTION, debug_name,
- WasmAssemblerOptions());
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(debug_name, os);
- }
-#endif
-
- if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code, "%s",
- debug_name);
- }
-
- return code;
+ return Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION,
+ std::move(debug_name), WasmAssemblerOptions());
}
-WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
- wasm::FunctionSig* expected_sig,
- bool has_bigint_feature) {
- if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
- auto imported_function = WasmExportedFunction::cast(*target);
- auto func_index = imported_function.function_index();
- auto module = imported_function.instance().module();
+std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
+ Handle<JSReceiver> callable, wasm::FunctionSig* expected_sig,
+ bool has_bigint_feature) {
+ if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
+ auto imported_function = Handle<WasmExportedFunction>::cast(callable);
+ auto func_index = imported_function->function_index();
+ auto module = imported_function->instance().module();
wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
if (*imported_sig != *expected_sig) {
- return WasmImportCallKind::kLinkError;
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- if (static_cast<uint32_t>(func_index) < module->num_imported_functions) {
- // TODO(wasm): this redirects all imported-reexported functions
- // through the call builtin. Fall through to JS function cases below?
- return WasmImportCallKind::kUseCallBuiltin;
+ if (static_cast<uint32_t>(func_index) >= module->num_imported_functions) {
+ return std::make_pair(WasmImportCallKind::kWasmToWasm, callable);
}
- return WasmImportCallKind::kWasmToWasm;
- }
- if (WasmCapiFunction::IsWasmCapiFunction(*target)) {
- WasmCapiFunction capi_function = WasmCapiFunction::cast(*target);
- if (!capi_function.IsSignatureEqual(expected_sig)) {
- return WasmImportCallKind::kLinkError;
+ Isolate* isolate = callable->GetIsolate();
+ // Resolve the short-cut to the underlying callable and continue.
+ Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
+ ImportedFunctionEntry entry(instance, func_index);
+ callable = handle(entry.callable(), isolate);
+ }
+ if (WasmJSFunction::IsWasmJSFunction(*callable)) {
+ auto js_function = Handle<WasmJSFunction>::cast(callable);
+ if (!js_function->MatchesSignature(expected_sig)) {
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
+ }
+ Isolate* isolate = callable->GetIsolate();
+ // Resolve the short-cut to the underlying callable and continue.
+ callable = handle(js_function->GetCallable(), isolate);
+ }
+ if (WasmCapiFunction::IsWasmCapiFunction(*callable)) {
+ auto capi_function = Handle<WasmCapiFunction>::cast(callable);
+ if (!capi_function->IsSignatureEqual(expected_sig)) {
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- return WasmImportCallKind::kWasmToCapi;
+ return std::make_pair(WasmImportCallKind::kWasmToCapi, callable);
}
// Assuming we are calling to JS, check whether this would be a runtime error.
if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) {
- return WasmImportCallKind::kRuntimeTypeError;
+ return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable);
}
// For JavaScript calls, determine whether the target has an arity match
// and whether it has a sloppy receiver.
- if (target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+ if (callable->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
SharedFunctionInfo shared = function->shared();
// Check for math intrinsics.
@@ -6004,7 +6151,9 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
wasm::FunctionSig* sig = wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
DCHECK_NOT_NULL(sig); \
- if (*expected_sig == *sig) return WasmImportCallKind::k##name; \
+ if (*expected_sig == *sig) { \
+ return std::make_pair(WasmImportCallKind::k##name, callable); \
+ } \
}
#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
case Builtins::kMath##name: \
@@ -6051,19 +6200,23 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
if (IsClassConstructor(shared.kind())) {
// Class constructor will throw anyway.
- return WasmImportCallKind::kUseCallBuiltin;
+ return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
bool sloppy = is_sloppy(shared.language_mode()) && !shared.native();
if (shared.internal_formal_parameter_count() ==
expected_sig->parameter_count()) {
- return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
- : WasmImportCallKind::kJSFunctionArityMatch;
+ return std::make_pair(
+ sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMatch,
+ callable);
}
- return sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy
- : WasmImportCallKind::kJSFunctionArityMismatch;
+ return std::make_pair(
+ sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMismatch,
+ callable);
}
// Unknown case. Use the call builtin.
- return WasmImportCallKind::kUseCallBuiltin;
+ return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
@@ -6103,10 +6256,9 @@ wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
#undef CASE
}
-wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
- wasm::NativeModule* native_module,
- WasmImportCallKind kind,
- wasm::FunctionSig* sig) {
+wasm::WasmCompilationResult CompileWasmMathIntrinsic(
+ wasm::WasmEngine* wasm_engine, WasmImportCallKind kind,
+ wasm::FunctionSig* sig) {
DCHECK_EQ(1, sig->return_count());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
@@ -6125,7 +6277,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
InstructionSelector::AlignmentRequirements()));
wasm::CompilationEnv env(
- native_module->module(), wasm::UseTrapHandler::kNoTrapHandler,
+ nullptr, wasm::UseTrapHandler::kNoTrapHandler,
wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
wasm::kAllWasmFeatures, wasm::LowerSimd::kNoLowerSimd);
@@ -6167,21 +6319,12 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
wasm_engine, call_descriptor, mcgraph, Code::WASM_FUNCTION,
wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(),
source_positions);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kFunction,
- wasm::ExecutionTier::kNone);
- // TODO(titzer): add counters for math intrinsic code size / allocation
- return native_module->PublishCode(std::move(wasm_code));
+ return result;
}
-wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
- wasm::NativeModule* native_module,
- WasmImportCallKind kind,
- wasm::FunctionSig* sig,
- bool source_positions) {
+wasm::WasmCompilationResult CompileWasmImportCallWrapper(
+ wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
+ WasmImportCallKind kind, wasm::FunctionSig* sig, bool source_positions) {
DCHECK_NE(WasmImportCallKind::kLinkError, kind);
DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
@@ -6189,7 +6332,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
if (FLAG_wasm_math_intrinsics &&
kind >= WasmImportCallKind::kFirstMathIntrinsic &&
kind <= WasmImportCallKind::kLastMathIntrinsic) {
- return CompileWasmMathIntrinsic(wasm_engine, native_module, kind, sig);
+ return CompileWasmMathIntrinsic(wasm_engine, kind, sig);
}
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
@@ -6214,7 +6357,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table,
StubCallMode::kCallWasmRuntimeStub,
- native_module->enabled_features());
+ env->enabled_features);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmImportCallWrapper(kind);
@@ -6232,13 +6375,8 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION,
wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
source_position_table);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
- wasm::ExecutionTier::kNone);
- return native_module->PublishCode(std::move(wasm_code));
+ result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
+ return result;
}
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
@@ -6290,9 +6428,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
wasm::WasmCode::kWasmToCapiWrapper, debug_name,
WasmStubAssemblerOptions(), source_positions);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToCapiWrapper,
wasm::ExecutionTier::kNone);
return native_module->PublishCode(std::move(wasm_code));
@@ -6338,24 +6475,26 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
wasm::WasmCode::kInterpreterEntry, func_name.begin(),
WasmStubAssemblerOptions());
result.result_tier = wasm::ExecutionTier::kInterpreter;
+ result.kind = wasm::WasmCompilationResult::kInterpreterEntry;
return result;
}
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
- Zone zone(isolate->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
+ std::unique_ptr<Zone> zone =
+ base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ Graph* graph = new (zone.get()) Graph(zone.get());
+ CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
+ zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
StubCallMode::kCallCodeObject,
wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
@@ -6363,29 +6502,36 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
builder.BuildCWasmEntry();
// Schedule and compile to machine code.
- CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
- &zone, false, CWasmEntryParameters::kNumParameters + 1,
- CallDescriptor::kNoFlags);
+ MachineType sig_types[] = {MachineType::Pointer(), // return
+ MachineType::Pointer(), // target
+ MachineType::AnyTagged(), // object_ref
+ MachineType::Pointer(), // argv
+ MachineType::Pointer()}; // c_entry_fp
+ MachineSignature incoming_sig(1, 4, sig_types);
+ // Traps need the root register, for TailCallRuntimeWithCEntry to call
+ // Runtime::kThrowWasmError.
+ bool initialize_root_flag = true;
+ CallDescriptor* incoming = Linkage::GetSimplifiedCDescriptor(
+ zone.get(), &incoming_sig, initialize_root_flag);
// Build a name in the form "c-wasm-entry:<params>:<returns>".
static constexpr size_t kMaxNameLen = 128;
- char debug_name[kMaxNameLen] = "c-wasm-entry:";
- AppendSignature(debug_name, kMaxNameLen, sig);
-
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
- isolate, incoming, &graph, Code::C_WASM_ENTRY, debug_name,
- AssemblerOptions::Default(isolate));
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(debug_name, os);
- }
-#endif
+ auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(debug_name.get(), "c-wasm-entry:", 14);
+ AppendSignature(debug_name.get(), kMaxNameLen, sig);
+
+ // Run the compilation job synchronously.
+ std::unique_ptr<OptimizedCompilationJob> job(
+ Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY,
+ std::move(debug_name), AssemblerOptions::Default(isolate)));
+
+ if (job->PrepareJob(isolate) == CompilationJob::FAILED ||
+ job->ExecuteJob() == CompilationJob::FAILED ||
+ job->FinalizeJob(isolate) == CompilationJob::FAILED) {
+ return {};
+ }
+ Handle<Code> code = job->compilation_info()->code();
return code;
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 460d0d2f1b..315733c396 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_WASM_COMPILER_H_
#include <memory>
+#include <utility>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
@@ -20,6 +21,7 @@
namespace v8 {
namespace internal {
struct AssemblerOptions;
+class OptimizedCompilationJob;
namespace compiler {
// Forward declarations for some compiler data structures.
@@ -103,13 +105,23 @@ enum class WasmImportCallKind : uint8_t {
kUseCallBuiltin
};
-V8_EXPORT_PRIVATE WasmImportCallKind
-GetWasmImportCallKind(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
+// TODO(wasm): There should be only one import kind for sloppy and strict in
+// order to reduce wrapper cache misses. The mode can be checked at runtime
+// instead.
+constexpr WasmImportCallKind kDefaultImportCallKind =
+ WasmImportCallKind::kJSFunctionArityMatchSloppy;
+
+// Resolves which import call wrapper is required for the given JS callable.
+// Returns the kind of wrapper need and the ultimate target callable. Note that
+// some callables (e.g. a {WasmExportedFunction} or {WasmJSFunction}) just wrap
+// another target, which is why the ultimate target is returned as well.
+V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>>
+ResolveWasmImportCall(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
bool has_bigint_feature);
// Compiles an import call wrapper, which allows WASM to call imports.
-V8_EXPORT_PRIVATE wasm::WasmCode* CompileWasmImportCallWrapper(
- wasm::WasmEngine*, wasm::NativeModule*, WasmImportCallKind,
+V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper(
+ wasm::WasmEngine*, wasm::CompilationEnv* env, WasmImportCallKind,
wasm::FunctionSig*, bool source_positions);
// Compiles a host call wrapper, which allows WASM to call host functions.
@@ -117,11 +129,9 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*,
wasm::NativeModule*,
wasm::FunctionSig*, Address address);
-// Creates a code object calling a wasm function with the given signature,
-// callable from JS.
-V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(Isolate*,
- wasm::FunctionSig*,
- bool is_import);
+// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
+std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+ Isolate* isolate, wasm::FunctionSig* sig, bool is_import);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
@@ -133,13 +143,13 @@ enum CWasmEntryParameters {
kCodeEntry,
kObjectRef,
kArgumentsBuffer,
+ kCEntryFp,
// marker:
kNumParameters
};
-// Compiles a stub with JS linkage, taking parameters as described by
-// {CWasmEntryParameters}. It loads the wasm parameters from the argument
-// buffer and calls the wasm function given as first parameter.
+// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
+// which knows how to feed it its parameters.
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
// Values from the instance object are cached between WASM-level function calls.
@@ -280,9 +290,9 @@ class WasmGraphBuilder {
Node* GetGlobal(uint32_t index);
Node* SetGlobal(uint32_t index, Node* val);
- Node* GetTable(uint32_t table_index, Node* index,
+ Node* TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position);
- Node* SetTable(uint32_t table_index, Node* index, Node* val,
+ Node* TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
@@ -377,6 +387,7 @@ class WasmGraphBuilder {
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position);
+ Node* AtomicFence();
// Returns a pointer to the dropped_data_segments array. Traps if the data
// segment is active or has been dropped.
@@ -395,7 +406,7 @@ class WasmGraphBuilder {
Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
- Node* TableCopy(uint32_t table_src_index, uint32_t table_dst_index, Node* dst,
+ Node* TableCopy(uint32_t table_dst_index, uint32_t table_src_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
Node* TableGrow(uint32_t table_index, Node* value, Node* delta);
Node* TableSize(uint32_t table_index);
@@ -485,10 +496,10 @@ class WasmGraphBuilder {
Node* BuildCallNode(wasm::FunctionSig* sig, Node** args,
wasm::WasmCodePosition position, Node* instance_node,
const Operator* op);
- // Special implementation for CallIndirect for table 0.
- Node* BuildIndirectCall(uint32_t sig_index, Node** args, Node*** rets,
- wasm::WasmCodePosition position,
- IsReturnCall continuation);
+ // Helper function for {BuildIndirectCall}.
+ void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size,
+ Node** ift_sig_ids, Node** ift_targets,
+ Node** ift_instances);
Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args,
Node*** rets, wasm::WasmCodePosition position,
IsReturnCall continuation);
@@ -591,8 +602,6 @@ class WasmGraphBuilder {
return buf;
}
- Node* BuildLoadBuiltinFromInstance(int builtin_index);
-
//-----------------------------------------------------------------------
// Operations involving the CEntry, a dependency we want to remove
// to get off the GC heap.
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index a29c596909..6656ab608d 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -48,6 +48,10 @@
#include "src/utils/utils.h"
#include "src/wasm/wasm-engine.h"
+#ifdef V8_USE_PERFETTO
+#include "perfetto/tracing.h"
+#endif // V8_USE_PERFETTO
+
#ifdef V8_INTL_SUPPORT
#include "unicode/locid.h"
#endif // V8_INTL_SUPPORT
@@ -247,15 +251,7 @@ namespace tracing {
namespace {
-// String options that can be used to initialize TraceOptions.
-const char kRecordUntilFull[] = "record-until-full";
-const char kRecordContinuously[] = "record-continuously";
-const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
-
-const char kRecordModeParam[] = "record_mode";
-const char kEnableSystraceParam[] = "enable_systrace";
-const char kEnableArgumentFilterParam[] = "enable_argument_filter";
-const char kIncludedCategoriesParam[] = "included_categories";
+static constexpr char kIncludedCategoriesParam[] = "included_categories";
class TraceConfigParser {
public:
@@ -273,30 +269,11 @@ class TraceConfigParser {
Local<Value> result = JSON::Parse(context, source).ToLocalChecked();
Local<v8::Object> trace_config_object = Local<v8::Object>::Cast(result);
- trace_config->SetTraceRecordMode(
- GetTraceRecordMode(isolate, context, trace_config_object));
- if (GetBoolean(isolate, context, trace_config_object,
- kEnableSystraceParam)) {
- trace_config->EnableSystrace();
- }
- if (GetBoolean(isolate, context, trace_config_object,
- kEnableArgumentFilterParam)) {
- trace_config->EnableArgumentFilter();
- }
UpdateIncludedCategoriesList(isolate, context, trace_config_object,
trace_config);
}
private:
- static bool GetBoolean(v8::Isolate* isolate, Local<Context> context,
- Local<v8::Object> object, const char* property) {
- Local<Value> value = GetValue(isolate, context, object, property);
- if (value->IsNumber()) {
- return value->BooleanValue(isolate);
- }
- return false;
- }
-
static int UpdateIncludedCategoriesList(
v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object,
platform::tracing::TraceConfig* trace_config) {
@@ -316,23 +293,6 @@ class TraceConfigParser {
}
return 0;
}
-
- static platform::tracing::TraceRecordMode GetTraceRecordMode(
- v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object) {
- Local<Value> value = GetValue(isolate, context, object, kRecordModeParam);
- if (value->IsString()) {
- Local<String> v8_string = value->ToString(context).ToLocalChecked();
- String::Utf8Value str(isolate, v8_string);
- if (strcmp(kRecordUntilFull, *str) == 0) {
- return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL;
- } else if (strcmp(kRecordContinuously, *str) == 0) {
- return platform::tracing::TraceRecordMode::RECORD_CONTINUOUSLY;
- } else if (strcmp(kRecordAsMuchAsPossible, *str) == 0) {
- return platform::tracing::TraceRecordMode::RECORD_AS_MUCH_AS_POSSIBLE;
- }
- }
- return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL;
- }
};
} // namespace
@@ -1927,7 +1887,7 @@ static void PrintNonErrorsMessageCallback(Local<Message> message,
auto ToCString = [](const v8::String::Utf8Value& value) {
return *value ? *value : "<string conversion failed>";
};
- Isolate* isolate = Isolate::GetCurrent();
+ Isolate* isolate = message->GetIsolate();
v8::String::Utf8Value msg(isolate, message->Get());
const char* msg_string = ToCString(msg);
// Print (filename):(line number): (message).
@@ -2001,20 +1961,20 @@ int LineFromOffset(Local<debug::Script> script, int offset) {
return location.GetLineNumber();
}
-void WriteLcovDataForRange(std::vector<uint32_t>& lines, int start_line,
+void WriteLcovDataForRange(std::vector<uint32_t>* lines, int start_line,
int end_line, uint32_t count) {
// Ensure space in the array.
- lines.resize(std::max(static_cast<size_t>(end_line + 1), lines.size()), 0);
+ lines->resize(std::max(static_cast<size_t>(end_line + 1), lines->size()), 0);
// Boundary lines could be shared between two functions with different
// invocation counts. Take the maximum.
- lines[start_line] = std::max(lines[start_line], count);
- lines[end_line] = std::max(lines[end_line], count);
+ (*lines)[start_line] = std::max((*lines)[start_line], count);
+ (*lines)[end_line] = std::max((*lines)[end_line], count);
// Invocation counts for non-boundary lines are overwritten.
- for (int k = start_line + 1; k < end_line; k++) lines[k] = count;
+ for (int k = start_line + 1; k < end_line; k++) (*lines)[k] = count;
}
void WriteLcovDataForNamedRange(std::ostream& sink,
- std::vector<uint32_t>& lines,
+ std::vector<uint32_t>* lines,
const std::string& name, int start_line,
int end_line, uint32_t count) {
WriteLcovDataForRange(lines, start_line, end_line, count);
@@ -2064,7 +2024,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
name_stream << start.GetColumnNumber() << ">";
}
- WriteLcovDataForNamedRange(sink, lines, name_stream.str(), start_line,
+ WriteLcovDataForNamedRange(sink, &lines, name_stream.str(), start_line,
end_line, count);
}
@@ -2074,7 +2034,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
int start_line = LineFromOffset(script, block_data.StartOffset());
int end_line = LineFromOffset(script, block_data.EndOffset() - 1);
uint32_t count = block_data.Count();
- WriteLcovDataForRange(lines, start_line, end_line, count);
+ WriteLcovDataForRange(&lines, start_line, end_line, count);
}
}
// Write per-line coverage. LCOV uses 1-based line numbers.
@@ -3350,24 +3310,25 @@ int Shell::Main(int argc, char* argv[]) {
std::unique_ptr<platform::tracing::TracingController> tracing;
std::ofstream trace_file;
-#ifdef V8_USE_PERFETTO
- std::ofstream perfetto_trace_file;
-#endif // V8_USE_PERFETTO
if (options.trace_enabled && !i::FLAG_verify_predictable) {
tracing = base::make_unique<platform::tracing::TracingController>();
-
trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json");
DCHECK(trace_file.good());
+
+#ifdef V8_USE_PERFETTO
+ // Set up the in-process backend that the tracing controller will connect
+ // to.
+ perfetto::TracingInitArgs init_args;
+ init_args.backends = perfetto::BackendType::kInProcessBackend;
+ perfetto::Tracing::Initialize(init_args);
+
+ tracing->InitializeForPerfetto(&trace_file);
+#else
platform::tracing::TraceBuffer* trace_buffer =
platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
platform::tracing::TraceBuffer::kRingBufferChunks,
platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
tracing->Initialize(trace_buffer);
-
-#ifdef V8_USE_PERFETTO
- perfetto_trace_file.open("v8_perfetto_trace.json");
- DCHECK(trace_file.good());
- tracing->InitializeForPerfetto(&perfetto_trace_file);
#endif // V8_USE_PERFETTO
}
diff --git a/deps/v8/src/date/OWNERS b/deps/v8/src/date/OWNERS
index fc4aa8d5ac..6edeeae0ea 100644
--- a/deps/v8/src/date/OWNERS
+++ b/deps/v8/src/date/OWNERS
@@ -1,3 +1,6 @@
ishell@chromium.org
jshin@chromium.org
ulan@chromium.org
+verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 46b472480d..220aa1ce26 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
jgruber@chromium.org
mvstanton@chromium.org
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 4021cd5038..5337f98db9 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -68,7 +68,8 @@ bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) {
return a.start < b.start;
}
-void SortBlockData(std::vector<CoverageBlock>& v) {
+void SortBlockData(
+ std::vector<CoverageBlock>& v) { // NOLINT(runtime/references)
// Sort according to the block nesting structure.
std::sort(v.begin(), v.end(), CompareCoverageBlock);
}
@@ -534,9 +535,9 @@ std::unique_ptr<Coverage> Coverage::Collect(
->feedback_vectors_for_profiling_tools()
->IsArrayList());
DCHECK_EQ(v8::debug::CoverageMode::kBestEffort, collectionMode);
- HeapIterator heap_iterator(isolate->heap());
- for (HeapObject current_obj = heap_iterator.next();
- !current_obj.is_null(); current_obj = heap_iterator.next()) {
+ HeapObjectIterator heap_iterator(isolate->heap());
+ for (HeapObject current_obj = heap_iterator.Next();
+ !current_obj.is_null(); current_obj = heap_iterator.Next()) {
if (!current_obj.IsJSFunction()) continue;
JSFunction func = JSFunction::cast(current_obj);
SharedFunctionInfo shared = func.shared();
@@ -678,9 +679,9 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
std::vector<Handle<JSFunction>> funcs_needing_feedback_vector;
{
- HeapIterator heap_iterator(isolate->heap());
- for (HeapObject o = heap_iterator.next(); !o.is_null();
- o = heap_iterator.next()) {
+ HeapObjectIterator heap_iterator(isolate->heap());
+ for (HeapObject o = heap_iterator.Next(); !o.is_null();
+ o = heap_iterator.Next()) {
if (o.IsJSFunction()) {
JSFunction func = JSFunction::cast(o);
if (func.has_closure_feedback_cell_array()) {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 65e62f2aac..0d8a7b2c7e 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -51,7 +51,7 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
}
MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
- StackFrame::Id frame_id,
+ StackFrameId frame_id,
int inlined_jsframe_index,
Handle<String> source,
bool throw_on_side_effect) {
@@ -312,6 +312,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ObjectValuesSkipFastPath) \
V(ObjectGetOwnPropertyNames) \
V(ObjectGetOwnPropertyNamesTryFast) \
+ V(ObjectIsExtensible) \
V(RegExpInitializeAndCompile) \
V(StackGuard) \
V(StringAdd) \
@@ -771,6 +772,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kStrictPoisonPillThrower:
case Builtins::kAllocateInYoungGeneration:
case Builtins::kAllocateInOldGeneration:
+ case Builtins::kAllocateRegularInYoungGeneration:
+ case Builtins::kAllocateRegularInOldGeneration:
return DebugInfo::kHasNoSideEffect;
// Set builtins.
@@ -904,7 +907,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
switch (callee) {
// Transitively called Builtins:
case Builtins::kAbort:
- case Builtins::kAbortJS:
+ case Builtins::kAbortCSAAssert:
case Builtins::kAdaptorWithBuiltinExitFrame:
case Builtins::kArrayConstructorImpl:
case Builtins::kArrayEveryLoopContinuation:
@@ -959,6 +962,8 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
case Builtins::kOrdinaryToPrimitive_String:
case Builtins::kParseInt:
case Builtins::kProxyHasProperty:
+ case Builtins::kProxyIsExtensible:
+ case Builtins::kProxyGetPrototypeOf:
case Builtins::kRecordWrite:
case Builtins::kStringAdd_CheckNone:
case Builtins::kStringEqual:
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 50817691d7..7819892050 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -7,8 +7,11 @@
#include <vector>
+#include "src/common/globals.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
+#include "src/debug/debug.h"
+#include "src/execution/frames.h"
#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string-table.h"
@@ -28,7 +31,7 @@ class DebugEvaluate : public AllStatic {
// - Parameters and stack-allocated locals need to be materialized. Altered
// values need to be written back to the stack afterwards.
// - The arguments object needs to materialized.
- static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
+ static MaybeHandle<Object> Local(Isolate* isolate, StackFrameId frame_id,
int inlined_jsframe_index,
Handle<String> source,
bool throw_on_side_effect);
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index a6ee31738d..4fe062b277 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -52,10 +52,13 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
}
}
-// NOLINTNEXTLINE
-FrameInspector::~FrameInspector() {
- // Destructor needs to be defined in the .cc file, because it instantiates
- // std::unique_ptr destructors but the types are not known in the header.
+// Destructor needs to be defined in the .cc file, because it instantiates
+// std::unique_ptr destructors but the types are not known in the header.
+FrameInspector::~FrameInspector() = default;
+
+JavaScriptFrame* FrameInspector::javascript_frame() {
+ return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
+ : JavaScriptFrame::cast(frame_);
}
int FrameInspector::GetParametersCount() {
@@ -90,8 +93,10 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag,
- &maybe_assigned_flag) != -1;
+ &maybe_assigned_flag,
+ &requires_brand_check) != -1;
}
RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared,
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 5ee4f8b61f..274d10030a 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -6,7 +6,6 @@
#define V8_DEBUG_DEBUG_FRAMES_H_
#include "src/deoptimizer/deoptimizer.h"
-#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/execution/v8threads.h"
#include "src/objects/objects.h"
@@ -15,12 +14,15 @@
namespace v8 {
namespace internal {
+class JavaScriptFrame;
+class StandardFrame;
+
class FrameInspector {
public:
FrameInspector(StandardFrame* frame, int inlined_frame_index,
Isolate* isolate);
- ~FrameInspector(); // NOLINT (modernize-use-equals-default)
+ ~FrameInspector();
int GetParametersCount();
Handle<JSFunction> GetFunction() const { return function_; }
@@ -37,10 +39,7 @@ class FrameInspector {
bool IsWasm();
bool IsJavaScript();
- inline JavaScriptFrame* javascript_frame() {
- return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
- : JavaScriptFrame::cast(frame_);
- }
+ JavaScriptFrame* javascript_frame();
int inlined_frame_index() const { return inlined_frame_index_; }
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 79222371f9..59bc6d0863 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -164,8 +164,9 @@ class WasmScript : public Script {
uint32_t GetFunctionHash(int function_index);
};
-V8_EXPORT_PRIVATE void GetLoadedScripts(Isolate* isolate,
- PersistentValueVector<Script>& scripts);
+V8_EXPORT_PRIVATE void GetLoadedScripts(
+ Isolate* isolate,
+ PersistentValueVector<Script>& scripts); // NOLINT(runtime/references)
MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
Local<String> source);
diff --git a/deps/v8/src/debug/debug-scope-iterator.h b/deps/v8/src/debug/debug-scope-iterator.h
index 3859e8cb41..44d6c49860 100644
--- a/deps/v8/src/debug/debug-scope-iterator.h
+++ b/deps/v8/src/debug/debug-scope-iterator.h
@@ -8,7 +8,6 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug-scopes.h"
-#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 3a58f0b458..1091e3a819 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -13,7 +13,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/objects/js-generator-inl.h"
-#include "src/objects/module.h"
+#include "src/objects/source-text-module.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
@@ -538,11 +538,8 @@ void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) {
int beg_pos = inner_scope->start_position();
int end_pos = inner_scope->end_position();
DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden());
- if (beg_pos <= position && position < end_pos) {
- // Don't walk into inner functions.
- if (!inner_scope->is_function_scope()) {
- current = inner_scope;
- }
+ if (beg_pos < position && position < end_pos) {
+ current = inner_scope;
break;
}
}
@@ -576,7 +573,7 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
int count_index = scope_info->ModuleVariableCountIndex();
int module_variable_count = Smi::cast(scope_info->get(count_index)).value();
- Handle<Module> module(context_->module(), isolate_);
+ Handle<SourceTextModule> module(context_->module(), isolate_);
for (int i = 0; i < module_variable_count; ++i) {
int index;
@@ -587,7 +584,8 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
if (ScopeInfo::VariableIsSynthetic(raw_name)) continue;
name = handle(raw_name, isolate_);
}
- Handle<Object> value = Module::LoadVariable(isolate_, module, index);
+ Handle<Object> value =
+ SourceTextModule::LoadVariable(isolate_, module, index);
// Reflect variables under TDZ as undeclared in scope object.
if (value->IsTheHole(isolate_)) continue;
@@ -614,15 +612,32 @@ bool ScopeIterator::VisitContextLocals(const Visitor& visitor,
bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
if (mode == Mode::STACK && current_scope_->is_declaration_scope() &&
current_scope_->AsDeclarationScope()->has_this_declaration()) {
- Handle<Object> receiver = frame_inspector_ == nullptr
- ? handle(generator_->receiver(), isolate_)
- : frame_inspector_->GetReceiver();
+ // TODO(bmeurer): We should refactor the general variable lookup
+ // around "this", since the current way is rather hacky when the
+ // receiver is context-allocated.
+ auto this_var = current_scope_->AsDeclarationScope()->receiver();
+ Handle<Object> receiver =
+ this_var->location() == VariableLocation::CONTEXT
+ ? handle(context_->get(this_var->index()), isolate_)
+ : frame_inspector_ == nullptr
+ ? handle(generator_->receiver(), isolate_)
+ : frame_inspector_->GetReceiver();
if (receiver->IsOptimizedOut(isolate_) || receiver->IsTheHole(isolate_)) {
receiver = isolate_->factory()->undefined_value();
}
if (visitor(isolate_->factory()->this_string(), receiver)) return true;
}
+ if (current_scope_->is_function_scope()) {
+ Variable* function_var =
+ current_scope_->AsDeclarationScope()->function_var();
+ if (function_var != nullptr) {
+ Handle<JSFunction> function = frame_inspector_->GetFunction();
+ Handle<String> name = function_var->name();
+ if (visitor(name, function)) return true;
+ }
+ }
+
for (Variable* var : *current_scope_->locals()) {
DCHECK(!var->is_this());
if (ScopeInfo::VariableIsSynthetic(*var->name())) continue;
@@ -696,8 +711,8 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
case VariableLocation::MODULE: {
if (mode == Mode::STACK) continue;
// if (var->IsExport()) continue;
- Handle<Module> module(context_->module(), isolate_);
- value = Module::LoadVariable(isolate_, module, var->index());
+ Handle<SourceTextModule> module(context_->module(), isolate_);
+ value = SourceTextModule::LoadVariable(isolate_, module, var->index());
// Reflect variables under TDZ as undeclared in scope object.
if (value->IsTheHole(isolate_)) continue;
break;
@@ -837,8 +852,8 @@ bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
case VariableLocation::MODULE:
if (!var->IsExport()) return false;
- Handle<Module> module(context_->module(), isolate_);
- Module::StoreVariable(module, var->index(), new_value);
+ Handle<SourceTextModule> module(context_->module(), isolate_);
+ SourceTextModule::StoreVariable(module, var->index(), new_value);
return true;
}
UNREACHABLE();
@@ -869,9 +884,10 @@ bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
- int slot_index =
- ScopeInfo::ContextSlotIndex(context_->scope_info(), *variable_name, &mode,
- &flag, &maybe_assigned_flag);
+ RequiresBrandCheckFlag requires_brand_check;
+ int slot_index = ScopeInfo::ContextSlotIndex(
+ context_->scope_info(), *variable_name, &mode, &flag,
+ &maybe_assigned_flag, &requires_brand_check);
if (slot_index < 0) return false;
context_->set(slot_index, *new_value);
@@ -889,13 +905,13 @@ bool ScopeIterator::SetModuleVariableValue(Handle<String> variable_name,
*variable_name, &mode, &init_flag, &maybe_assigned_flag);
// Setting imports is currently not supported.
- if (ModuleDescriptor::GetCellIndexKind(cell_index) !=
- ModuleDescriptor::kExport) {
+ if (SourceTextModuleDescriptor::GetCellIndexKind(cell_index) !=
+ SourceTextModuleDescriptor::kExport) {
return false;
}
- Handle<Module> module(context_->module(), isolate_);
- Module::StoreVariable(module, cell_index, new_value);
+ Handle<SourceTextModule> module(context_->module(), isolate_);
+ SourceTextModule::StoreVariable(module, cell_index, new_value);
return true;
}
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 6e1c8b27bc..5c3361619a 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -8,11 +8,11 @@
#include <vector>
#include "src/debug/debug-frames.h"
-#include "src/execution/frames.h"
namespace v8 {
namespace internal {
+class JavaScriptFrame;
class ParseInfo;
// Iterate over the actual scopes visible from a stack frame or from a closure.
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 2c2c438727..a0c6fa967c 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -98,9 +98,10 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
int slot_index = ScopeInfo::ContextSlotIndex(
context->scope_info(), ReadOnlyRoots(isolate_->heap()).this_string(),
- &mode, &flag, &maybe_assigned_flag);
+ &mode, &flag, &maybe_assigned_flag, &requires_brand_check);
if (slot_index < 0) return v8::MaybeLocal<v8::Value>();
Handle<Object> value = handle(context->get(slot_index), isolate_);
if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>();
@@ -166,7 +167,7 @@ DebugStackTraceIterator::GetScopeIterator() const {
bool DebugStackTraceIterator::Restart() {
DCHECK(!Done());
if (iterator_.is_wasm()) return false;
- return !LiveEdit::RestartFrame(iterator_.javascript_frame());
+ return LiveEdit::RestartFrame(iterator_.javascript_frame());
}
v8::MaybeLocal<v8::Value> DebugStackTraceIterator::Evaluate(
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 5cc200d552..9b5200e343 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -15,6 +15,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -22,7 +23,6 @@
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/v8threads.h"
#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h" // For NextDebuggingId.
@@ -336,7 +336,7 @@ void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
// Threading support.
void Debug::ThreadInit() {
- thread_local_.break_frame_id_ = StackFrame::NO_ID;
+ thread_local_.break_frame_id_ = StackFrameId::NO_ID;
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_frame_count_ = -1;
@@ -960,9 +960,9 @@ void Debug::PrepareStep(StepAction step_action) {
// any. The debug frame will only be present if execution was stopped due to
// hitting a break point. In other situations (e.g. unhandled exception) the
// debug frame is not present.
- StackFrame::Id frame_id = break_frame_id();
+ StackFrameId frame_id = break_frame_id();
// If there is no JavaScript stack don't do anything.
- if (frame_id == StackFrame::NO_ID) return;
+ if (frame_id == StackFrameId::NO_ID) return;
feature_tracker()->Track(DebugFeatureTracker::kStepping);
@@ -1226,9 +1226,9 @@ void Debug::InstallDebugBreakTrampoline() {
std::vector<Handle<JSFunction>> needs_compile;
std::vector<Handle<AccessorPair>> needs_instantiate;
{
- HeapIterator iterator(isolate_->heap());
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(isolate_->heap());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (needs_to_clear_ic && obj.IsFeedbackVector()) {
FeedbackVector::cast(obj).ClearSlots(isolate_);
continue;
@@ -1649,7 +1649,7 @@ void Debug::ScheduleFrameRestart(StackFrame* frame) {
// Reset break frame ID to the frame below the restarted frame.
StackTraceFrameIterator it(isolate_);
- thread_local_.break_frame_id_ = StackFrame::NO_ID;
+ thread_local_.break_frame_id_ = StackFrameId::NO_ID;
for (StackTraceFrameIterator it(isolate_); !it.done(); it.Advance()) {
if (it.frame()->fp() > thread_local_.restart_fp_) {
thread_local_.break_frame_id_ = it.frame()->id();
@@ -1913,7 +1913,7 @@ void Debug::ProcessCompileEvent(bool has_compile_error, Handle<Script> script) {
int Debug::CurrentFrameCount() {
StackTraceFrameIterator it(isolate_);
- if (break_frame_id() != StackFrame::NO_ID) {
+ if (break_frame_id() != StackFrameId::NO_ID) {
// Skip to break frame.
DCHECK(in_debug_scope());
while (!it.done() && it.frame()->id() != break_frame_id()) it.Advance();
@@ -2058,7 +2058,7 @@ DebugScope::DebugScope(Debug* debug)
StackTraceFrameIterator it(isolate());
bool has_frames = !it.done();
debug_->thread_local_.break_frame_id_ =
- has_frames ? it.frame()->id() : StackFrame::NO_ID;
+ has_frames ? it.frame()->id() : StackFrameId::NO_ID;
debug_->UpdateState();
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 8ac77e259d..684397400a 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -11,7 +11,7 @@
#include "src/common/globals.h"
#include "src/debug/debug-interface.h"
#include "src/debug/interface-types.h"
-#include "src/execution/frames.h"
+#include "src/execution/interrupts-scope.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/objects/debug-objects.h"
@@ -22,7 +22,10 @@ namespace internal {
// Forward declarations.
class AbstractCode;
class DebugScope;
+class InterpretedFrame;
+class JavaScriptFrame;
class JSGeneratorObject;
+class StackFrame;
// Step actions. NOTE: These values are in macros.py as well.
enum StepAction : int8_t {
@@ -341,7 +344,7 @@ class V8_EXPORT_PRIVATE Debug {
void set_break_points_active(bool v) { break_points_active_ = v; }
bool break_points_active() const { return break_points_active_; }
- StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
+ StackFrameId break_frame_id() { return thread_local_.break_frame_id_; }
Handle<Object> return_value_handle();
Object return_value() { return thread_local_.return_value_; }
@@ -497,7 +500,7 @@ class V8_EXPORT_PRIVATE Debug {
base::AtomicWord current_debug_scope_;
// Frame id for the frame of the current break.
- StackFrame::Id break_frame_id_;
+ StackFrameId break_frame_id_;
// Step action for last step performed.
StepAction last_step_action_;
@@ -564,7 +567,7 @@ class DebugScope {
Debug* debug_;
DebugScope* prev_; // Previous scope if entered recursively.
- StackFrame::Id break_frame_id_; // Previous break frame id.
+ StackFrameId break_frame_id_; // Previous break frame id.
PostponeInterruptsScope no_interrupts_;
};
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 9144e03be4..6e8a349a7d 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -11,6 +11,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/codegen/source-position-table.h"
+#include "src/common/globals.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
@@ -826,9 +827,10 @@ class FunctionDataMap : public ThreadVisitor {
void Fill(Isolate* isolate, Address* restart_frame_fp) {
{
- HeapIterator iterator(isolate->heap(), HeapIterator::kFilterUnreachable);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(isolate->heap(),
+ HeapObjectIterator::kFilterUnreachable);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
FunctionData* data = nullptr;
@@ -851,7 +853,7 @@ class FunctionDataMap : public ThreadVisitor {
}
}
FunctionData::StackPosition stack_position =
- isolate->debug()->break_frame_id() == StackFrame::NO_ID
+ isolate->debug()->break_frame_id() == StackFrameId::NO_ID
? FunctionData::PATCHABLE
: FunctionData::ABOVE_BREAK_FRAME;
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
@@ -936,10 +938,10 @@ class FunctionDataMap : public ThreadVisitor {
std::map<FuncId, FunctionData> map_;
};
-bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
- Handle<Script> new_script,
- FunctionDataMap& function_data_map,
- debug::LiveEditResult* result) {
+bool CanPatchScript(
+ const LiteralMap& changed, Handle<Script> script, Handle<Script> new_script,
+ FunctionDataMap& function_data_map, // NOLINT(runtime/references)
+ debug::LiveEditResult* result) {
debug::LiveEditResult::Status status = debug::LiveEditResult::OK;
for (const auto& mapping : changed) {
FunctionData* data = nullptr;
@@ -970,9 +972,10 @@ bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
return true;
}
-bool CanRestartFrame(Isolate* isolate, Address fp,
- FunctionDataMap& function_data_map,
- const LiteralMap& changed, debug::LiveEditResult* result) {
+bool CanRestartFrame(
+ Isolate* isolate, Address fp,
+ FunctionDataMap& function_data_map, // NOLINT(runtime/references)
+ const LiteralMap& changed, debug::LiveEditResult* result) {
DCHECK_GT(fp, 0);
StackFrame* restart_frame = nullptr;
StackFrameIterator it(isolate);
@@ -1118,13 +1121,10 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
UpdatePositions(isolate, sfi, diffs);
sfi->set_script(*new_script);
- if (sfi->HasUncompiledData()) {
- sfi->uncompiled_data().set_function_literal_id(
- mapping.second->function_literal_id());
- }
+ sfi->set_function_literal_id(mapping.second->function_literal_id());
new_script->shared_function_infos().Set(
mapping.second->function_literal_id(), HeapObjectReference::Weak(*sfi));
- DCHECK_EQ(sfi->FunctionLiteralId(isolate),
+ DCHECK_EQ(sfi->function_literal_id(),
mapping.second->function_literal_id());
// Save the new start_position -> id mapping, so that we can recover it when
@@ -1222,7 +1222,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
std::set<int> start_positions;
for (SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
DCHECK_EQ(sfi.script(), *new_script);
- DCHECK_EQ(sfi.FunctionLiteralId(isolate), it.CurrentIndex());
+ DCHECK_EQ(sfi.function_literal_id(), it.CurrentIndex());
// Don't check the start position of the top-level function, as it can
// overlap with a function in the script.
if (sfi.is_toplevel()) {
@@ -1242,7 +1242,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
SharedFunctionInfo::cast(constants.get(i));
DCHECK_EQ(inner_sfi.script(), *new_script);
DCHECK_EQ(inner_sfi, new_script->shared_function_infos()
- .Get(inner_sfi.FunctionLiteralId(isolate))
+ .Get(inner_sfi.function_literal_id())
->GetHeapObject());
}
}
@@ -1273,8 +1273,8 @@ void LiveEdit::InitializeThreadLocal(Debug* debug) {
bool LiveEdit::RestartFrame(JavaScriptFrame* frame) {
if (!LiveEdit::kFrameDropperSupported) return false;
Isolate* isolate = frame->isolate();
- StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
- bool break_frame_found = break_frame_id == StackFrame::NO_ID;
+ StackFrameId break_frame_id = isolate->debug()->break_frame_id();
+ bool break_frame_found = break_frame_id == StackFrameId::NO_ID;
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
StackFrame* current = it.frame();
break_frame_found = break_frame_found || break_frame_id == current->id();
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS
index 97a194d7cf..632607a952 100644
--- a/deps/v8/src/deoptimizer/OWNERS
+++ b/deps/v8/src/deoptimizer/OWNERS
@@ -3,3 +3,5 @@ jarin@chromium.org
mstarzinger@chromium.org
sigurds@chromium.org
tebbi@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index 4004dfd90f..89e9988f9e 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -231,7 +231,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(lr);
__ Jump(scratch);
}
- __ stop("Unreachable.");
+ __ stop();
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index d556e89927..ac2273460a 100644
--- a/deps/v8/src/deoptimizer/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -12,6 +12,7 @@ namespace internal {
#define DEOPTIMIZE_REASON_LIST(V) \
V(ArrayBufferWasDetached, "array buffer was detached") \
+ V(BigIntTooBig, "BigInt too big") \
V(CowArrayElementsChanged, "copy-on-write array's elements changed") \
V(CouldNotGrowElements, "failed to grow elements store") \
V(DeoptimizeNow, "%_DeoptimizeNow") \
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 91556cfbdc..47c40d373e 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -159,7 +159,7 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_.IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
Isolate* isolate = isolate_;
- Context native_context = function_.context().native_context();
+ NativeContext native_context = function_.context().native_context();
Object element = native_context.DeoptimizedCodeListHead();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
@@ -270,10 +270,10 @@ class ActivationsFinder : public ThreadVisitor {
// Move marked code from the optimized code list to the deoptimized code list,
// and replace pc on the stack for codes marked for deoptimization.
-void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
+void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
DisallowHeapAllocation no_allocation;
- Isolate* isolate = context.GetIsolate();
+ Isolate* isolate = native_context.GetIsolate();
Code topmost_optimized_code;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
@@ -315,7 +315,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
// Move marked code from the optimized code list to the deoptimized code list.
// Walk over all optimized code objects in this native context.
Code prev;
- Object element = context.OptimizedCodeListHead();
+ Object element = native_context.OptimizedCodeListHead();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION);
@@ -329,12 +329,12 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
prev.set_next_code_link(next);
} else {
// There was no previous node, the next node is the new head.
- context.SetOptimizedCodeListHead(next);
+ native_context.SetOptimizedCodeListHead(next);
}
// Move the code to the _deoptimized_ code list.
- code.set_next_code_link(context.DeoptimizedCodeListHead());
- context.SetDeoptimizedCodeListHead(code);
+ code.set_next_code_link(native_context.DeoptimizedCodeListHead());
+ native_context.SetDeoptimizedCodeListHead(code);
} else {
// Not marked; preserve this element.
prev = code;
@@ -373,7 +373,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
// For all contexts, mark all code, then deoptimize.
Object context = isolate->heap()->native_contexts_list();
while (!context.IsUndefined(isolate)) {
- Context native_context = Context::cast(context);
+ NativeContext native_context = NativeContext::cast(context);
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link();
@@ -393,15 +393,15 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
// For all contexts, deoptimize code already marked.
Object context = isolate->heap()->native_contexts_list();
while (!context.IsUndefined(isolate)) {
- Context native_context = Context::cast(context);
+ NativeContext native_context = NativeContext::cast(context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link();
}
}
-void Deoptimizer::MarkAllCodeForContext(Context context) {
- Object element = context.OptimizedCodeListHead();
- Isolate* isolate = context.GetIsolate();
+void Deoptimizer::MarkAllCodeForContext(NativeContext native_context) {
+ Object element = native_context.OptimizedCodeListHead();
+ Isolate* isolate = native_context.GetIsolate();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION);
@@ -590,7 +590,7 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
// Count all entries in the deoptimizing code list of every context.
Object context = isolate->heap()->native_contexts_list();
while (!context.IsUndefined(isolate)) {
- Context native_context = Context::cast(context);
+ NativeContext native_context = NativeContext::cast(context);
Object element = native_context.DeoptimizedCodeListHead();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
@@ -633,6 +633,12 @@ bool ShouldPadArguments(int arg_count) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
+ // When we call this function, the return address of the previous frame has
+ // been removed from the stack by GenerateDeoptimizationEntries() so the stack
+ // is not iterable by the SafeStackFrameIterator.
+#if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
+ DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable());
+#endif
base::ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
@@ -662,10 +668,6 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- StackGuard* const stack_guard = isolate()->stack_guard();
- CHECK_GT(static_cast<uintptr_t>(caller_frame_top_),
- stack_guard->real_jslimit());
-
if (trace_scope_ != nullptr) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
@@ -723,7 +725,6 @@ void Deoptimizer::DoComputeOutputFrames() {
// Translate each output frame.
int frame_index = 0; // output_frame_index
- size_t total_output_frame_size = 0;
for (size_t i = 0; i < count; ++i, ++frame_index) {
// Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
@@ -759,7 +760,6 @@ void Deoptimizer::DoComputeOutputFrames() {
FATAL("invalid frame");
break;
}
- total_output_frame_size += output_[frame_index]->GetFrameSize();
}
FrameDescription* topmost = output_[count - 1];
@@ -779,14 +779,6 @@ void Deoptimizer::DoComputeOutputFrames() {
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
caller_frame_top_, ms);
}
-
- // TODO(jgruber,neis):
- // The situation that the output frames do not fit into the stack space should
- // be prevented by an optimized function's initial stack check: That check
- // must fail if the (interpreter) frames generated upon deoptimization of the
- // function would overflow the stack.
- CHECK_GT(static_cast<uintptr_t>(caller_frame_top_) - total_output_frame_size,
- stack_guard->real_jslimit());
}
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
@@ -1364,21 +1356,25 @@ bool Deoptimizer::BuiltinContinuationModeIsWithCatch(
UNREACHABLE();
}
-StackFrame::Type Deoptimizer::BuiltinContinuationModeToFrameType(
- BuiltinContinuationMode mode) {
+namespace {
+
+StackFrame::Type BuiltinContinuationModeToFrameType(
+ Deoptimizer::BuiltinContinuationMode mode) {
switch (mode) {
- case BuiltinContinuationMode::STUB:
+ case Deoptimizer::BuiltinContinuationMode::STUB:
return StackFrame::BUILTIN_CONTINUATION;
- case BuiltinContinuationMode::JAVASCRIPT:
+ case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT:
return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION;
- case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+ case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
- case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+ case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
}
UNREACHABLE();
}
+} // namespace
+
Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
BuiltinContinuationMode mode, bool must_handle_result) {
switch (mode) {
@@ -1438,7 +1434,7 @@ Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
// +-------------------------+
// | context |<- this non-standard context slot contains
// +-------------------------+ the context, even for non-JS builtins.
-// | builtin address |
+// | builtin index |
// +-------------------------+
// | builtin input GPR reg0 |<- populated from deopt FrameState using
// +-------------------------+ the builtin's CallInterfaceDescriptor
@@ -1663,7 +1659,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
"builtin JavaScript context\n");
// The builtin to continue to.
- frame_writer.PushRawObject(builtin, "builtin address\n");
+ frame_writer.PushRawObject(Smi::FromInt(builtin.builtin_index()),
+ "builtin index\n");
for (int i = 0; i < allocatable_register_count; ++i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -3037,12 +3034,7 @@ int TranslatedState::CreateNextTranslatedValue(
return translated_value.GetChildrenCount();
}
intptr_t value = registers->GetRegister(input_reg);
-#if defined(V8_COMPRESS_POINTERS)
- Address uncompressed_value = DecompressTaggedAny(
- isolate()->isolate_root(), static_cast<uint32_t>(value));
-#else
- Address uncompressed_value = value;
-#endif
+ Address uncompressed_value = DecompressIfNeeded(value);
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", uncompressed_value,
converter.NameOfCPURegister(input_reg));
@@ -3165,12 +3157,7 @@ int TranslatedState::CreateNextTranslatedValue(
int slot_offset =
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
-#if defined(V8_COMPRESS_POINTERS)
- Address uncompressed_value = DecompressTaggedAny(
- isolate()->isolate_root(), static_cast<uint32_t>(value));
-#else
- Address uncompressed_value = value;
-#endif
+ Address uncompressed_value = DecompressIfNeeded(value);
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ",
uncompressed_value, slot_offset < 0 ? '-' : '+',
@@ -3284,6 +3271,15 @@ int TranslatedState::CreateNextTranslatedValue(
FATAL("We should never get here - unexpected deopt info.");
}
+Address TranslatedState::DecompressIfNeeded(intptr_t value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return DecompressTaggedAny(isolate()->isolate_root(),
+ static_cast<uint32_t>(value));
+ } else {
+ return value;
+ }
+}
+
TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationData data =
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 67e3e54405..a2471247ef 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -16,7 +16,6 @@
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/diagnostics/code-tracer.h"
#include "src/execution/frame-constants.h"
-#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/shared-function-info.h"
@@ -28,8 +27,10 @@ namespace v8 {
namespace internal {
class FrameDescription;
+class JavaScriptFrame;
class TranslationIterator;
class DeoptimizedFrameInfo;
+class TranslatedFrame;
class TranslatedState;
class RegisterValues;
class MacroAssembler;
@@ -340,6 +341,7 @@ class TranslatedState {
int CreateNextTranslatedValue(int frame_index, TranslationIterator* iterator,
FixedArray literal_array, Address fp,
RegisterValues* registers, FILE* trace_file);
+ Address DecompressIfNeeded(intptr_t value);
Address ComputeArgumentsPosition(Address input_frame_pointer,
CreateArgumentsType type, int* length);
void CreateArgumentsElementsTranslatedValues(int frame_index,
@@ -499,6 +501,13 @@ class Deoptimizer : public Malloced {
static const int kMaxNumberOfEntries = 16384;
+ enum class BuiltinContinuationMode {
+ STUB,
+ JAVASCRIPT,
+ JAVASCRIPT_WITH_CATCH,
+ JAVASCRIPT_HANDLE_EXCEPTION
+ };
+
private:
friend class FrameWriter;
void QueueValueForMaterialization(Address output_address, Object obj,
@@ -521,16 +530,8 @@ class Deoptimizer : public Malloced {
void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int frame_index);
- enum class BuiltinContinuationMode {
- STUB,
- JAVASCRIPT,
- JAVASCRIPT_WITH_CATCH,
- JAVASCRIPT_HANDLE_EXCEPTION
- };
static bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode);
static bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode);
- static StackFrame::Type BuiltinContinuationModeToFrameType(
- BuiltinContinuationMode mode);
static Builtins::Name TrampolineForBuiltinContinuation(
BuiltinContinuationMode mode, bool must_handle_result);
@@ -549,11 +550,8 @@ class Deoptimizer : public Malloced {
Isolate* isolate,
DeoptimizeKind kind);
- // Marks all the code in the given context for deoptimization.
- static void MarkAllCodeForContext(Context native_context);
-
- // Deoptimizes all code marked in the given context.
- static void DeoptimizeMarkedCodeForContext(Context native_context);
+ static void MarkAllCodeForContext(NativeContext native_context);
+ static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
// Some architectures need to push padding together with the TOS register
// in order to maintain stack alignment.
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 6b01449ba7..f40ff562be 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -116,6 +116,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(0));
+
// Remove the return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
@@ -194,6 +200,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ push(Operand(esi, offset));
}
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(1));
+
// Restore the registers from the stack.
__ popad();
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index a56501660b..07bc9a511b 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -225,7 +225,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
- __ stop("Unreachable.");
+ __ stop();
}
// Maximum size of a table entry generated below.
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index 6869199f1b..f85659c4ab 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -226,7 +226,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
- __ stop("Unreachable.");
+ __ stop();
}
// Maximum size of a table entry generated below.
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 268660c2ef..41616a5af2 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -56,11 +56,13 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
-
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ StoreP(fp, MemOperand(ip));
-
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ mov(scratch, Operand(ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate)));
+ __ StoreP(fp, MemOperand(scratch));
+ }
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
@@ -210,20 +212,28 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ push(r9);
// Restore the registers from the last output frame.
- DCHECK(!(ip.bit() & restored_regs));
- __ mr(ip, r5);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(ip, offset));
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ DCHECK(!(scratch.bit() & restored_regs));
+ __ mr(scratch, r5);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(scratch, offset));
+ }
}
}
- __ pop(ip); // get continuation, leave pc on stack
- __ pop(r0);
- __ mtlr(r0);
- __ Jump(ip);
- __ stop("Unreachable.");
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ pop(scratch); // get continuation, leave pc on stack
+ __ pop(r0);
+ __ mtlr(r0);
+ __ Jump(scratch);
+ }
+ __ stop();
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index db2330a8e8..6da740b0e5 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -228,7 +228,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
- __ stop("Unreachable.");
+ __ stop();
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 7654dc965f..cfdd6c9ef1 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -129,6 +129,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ popq(Operand(rbx, dst_offset));
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(0));
+
// Remove the return address from the stack.
__ addq(rsp, Immediate(kPCOnStackSize));
@@ -218,6 +224,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ popq(r);
}
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(1));
+
// Return to the continuation point.
__ ret(0);
}
diff --git a/deps/v8/src/diagnostics/DEPS b/deps/v8/src/diagnostics/DEPS
new file mode 100644
index 0000000000..27782f9ecd
--- /dev/null
+++ b/deps/v8/src/diagnostics/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+src/compiler/node.h",
+]
diff --git a/deps/v8/src/diagnostics/eh-frame.cc b/deps/v8/src/diagnostics/eh-frame.cc
index e19e09f332..45d693a476 100644
--- a/deps/v8/src/diagnostics/eh-frame.cc
+++ b/deps/v8/src/diagnostics/eh-frame.cc
@@ -582,7 +582,8 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
void EhFrameDisassembler::DisassembleToStream(std::ostream& stream) { // NOLINT
// The encoded CIE size does not include the size field itself.
const int cie_size =
- ReadUnalignedUInt32(reinterpret_cast<Address>(start_)) + kInt32Size;
+ base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(start_)) +
+ kInt32Size;
const int fde_offset = cie_size;
const byte* cie_directives_start =
@@ -597,12 +598,13 @@ void EhFrameDisassembler::DisassembleToStream(std::ostream& stream) { // NOLINT
reinterpret_cast<Address>(start_) + fde_offset +
EhFrameConstants::kProcedureAddressOffsetInFde;
int32_t procedure_offset =
- ReadUnalignedValue<int32_t>(procedure_offset_address);
+ base::ReadUnalignedValue<int32_t>(procedure_offset_address);
Address procedure_size_address = reinterpret_cast<Address>(start_) +
fde_offset +
EhFrameConstants::kProcedureSizeOffsetInFde;
- uint32_t procedure_size = ReadUnalignedUInt32(procedure_size_address);
+ uint32_t procedure_size =
+ base::ReadUnalignedValue<uint32_t>(procedure_size_address);
const byte* fde_start = start_ + fde_offset;
stream << reinterpret_cast<const void*>(fde_start) << " .eh_frame: FDE\n"
diff --git a/deps/v8/src/diagnostics/eh-frame.h b/deps/v8/src/diagnostics/eh-frame.h
index 8b78b04b16..a9d76a2743 100644
--- a/deps/v8/src/diagnostics/eh-frame.h
+++ b/deps/v8/src/diagnostics/eh-frame.h
@@ -6,9 +6,9 @@
#define V8_DIAGNOSTICS_EH_FRAME_H_
#include "src/base/compiler-specific.h"
+#include "src/base/memory.h"
#include "src/codegen/register-arch.h"
#include "src/common/globals.h"
-#include "src/common/v8memory.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -144,11 +144,11 @@ class V8_EXPORT_PRIVATE EhFrameWriter {
}
void PatchInt32(int base_offset, uint32_t value) {
DCHECK_EQ(
- ReadUnalignedUInt32(reinterpret_cast<Address>(eh_frame_buffer_.data()) +
- base_offset),
+ base::ReadUnalignedValue<uint32_t>(
+ reinterpret_cast<Address>(eh_frame_buffer_.data()) + base_offset),
kInt32Placeholder);
DCHECK_LT(base_offset + kInt32Size, eh_frame_offset());
- WriteUnalignedUInt32(
+ base::WriteUnalignedValue<uint32_t>(
reinterpret_cast<Address>(eh_frame_buffer_.data()) + base_offset,
value);
}
@@ -216,7 +216,9 @@ class V8_EXPORT_PRIVATE EhFrameIterator {
void SkipCie() {
DCHECK_EQ(next_, start_);
- next_ += ReadUnalignedUInt32(reinterpret_cast<Address>(next_)) + kInt32Size;
+ next_ +=
+ base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(next_)) +
+ kInt32Size;
}
void SkipToFdeDirectives() {
@@ -267,7 +269,7 @@ class V8_EXPORT_PRIVATE EhFrameIterator {
T GetNextValue() {
T result;
DCHECK_LE(next_ + sizeof(result), end_);
- result = ReadUnalignedValue<T>(reinterpret_cast<Address>(next_));
+ result = base::ReadUnalignedValue<T>(reinterpret_cast<Address>(next_));
next_ += sizeof(result);
return result;
}
diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc
index 70fd9fb06d..e1290bae4e 100644
--- a/deps/v8/src/diagnostics/gdb-jit.cc
+++ b/deps/v8/src/diagnostics/gdb-jit.cc
@@ -4,6 +4,7 @@
#include "src/diagnostics/gdb-jit.h"
+#include <map>
#include <memory>
#include <vector>
@@ -17,7 +18,6 @@
#include "src/objects/objects.h"
#include "src/snapshot/natives.h"
#include "src/utils/ostreams.h"
-#include "src/utils/splay-tree-inl.h"
#include "src/utils/vector.h"
#include "src/zone/zone-chunk-list.h"
@@ -1822,23 +1822,24 @@ struct AddressRange {
Address end;
};
-struct SplayTreeConfig {
+struct AddressRangeLess {
+ bool operator()(const AddressRange& a, const AddressRange& b) const {
+ if (a.start == b.start) return a.end < b.end;
+ return a.start < b.start;
+ }
+};
+
+struct CodeMapConfig {
using Key = AddressRange;
using Value = JITCodeEntry*;
- static const AddressRange kNoKey;
- static Value NoValue() { return nullptr; }
- static int Compare(const AddressRange& a, const AddressRange& b) {
- // ptrdiff_t probably doesn't fit in an int.
- if (a.start < b.start) return -1;
- if (a.start == b.start) return 0;
- return 1;
- }
+ using Less = AddressRangeLess;
};
-const AddressRange SplayTreeConfig::kNoKey = {0, 0};
-using CodeMap = SplayTree<SplayTreeConfig>;
+using CodeMap =
+ std::map<CodeMapConfig::Key, CodeMapConfig::Value, CodeMapConfig::Less>;
static CodeMap* GetCodeMap() {
+ // TODO(jgruber): Don't leak.
static CodeMap* code_map = nullptr;
if (code_map == nullptr) code_map = new CodeMap();
return code_map;
@@ -1909,37 +1910,49 @@ static void AddUnwindInfo(CodeDescription* desc) {
static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-// Remove entries from the splay tree that intersect the given address range,
+// Remove entries from the map that intersect the given address range,
// and deregister them from GDB.
static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
DCHECK(range.start < range.end);
- CodeMap::Locator cur;
- if (map->FindGreatestLessThan(range, &cur) || map->FindLeast(&cur)) {
- // Skip entries that are entirely less than the range of interest.
- while (cur.key().end <= range.start) {
- // CodeMap::FindLeastGreaterThan succeeds for entries whose key is greater
- // than _or equal to_ the given key, so we have to advance our key to get
- // the next one.
- AddressRange new_key;
- new_key.start = cur.key().end;
- new_key.end = 0;
- if (!map->FindLeastGreaterThan(new_key, &cur)) return;
- }
- // Evict intersecting ranges.
- while (cur.key().start < range.end) {
- AddressRange old_range = cur.key();
- JITCodeEntry* old_entry = cur.value();
- UnregisterCodeEntry(old_entry);
- DestroyCodeEntry(old_entry);
+ if (map->empty()) return;
+
+ // Find the first overlapping entry.
- CHECK(map->Remove(old_range));
- if (!map->FindLeastGreaterThan(old_range, &cur)) return;
+ // If successful, points to the first element not less than `range`. The
+ // returned iterator has the key in `first` and the value in `second`.
+ auto it = map->lower_bound(range);
+ auto start_it = it;
+
+ if (it == map->end()) {
+ start_it = map->begin();
+ } else if (it != map->begin()) {
+ for (--it; it != map->begin(); --it) {
+ if ((*it).first.end <= range.start) break;
+ start_it = it;
}
}
+
+ DCHECK(start_it != map->end());
+
+ // Find the first non-overlapping entry after `range`.
+
+ const auto end_it = map->lower_bound({range.end, 0});
+
+ // Evict intersecting ranges.
+
+ if (std::distance(start_it, end_it) < 1) return; // No overlapping entries.
+
+ for (auto it = start_it; it != end_it; it++) {
+ JITCodeEntry* old_entry = (*it).second;
+ UnregisterCodeEntry(old_entry);
+ DestroyCodeEntry(old_entry);
+ }
+
+ map->erase(start_it, end_it);
}
-// Insert the entry into the splay tree and register it with GDB.
+// Insert the entry into the map and register it with GDB.
static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
JITCodeEntry* entry, bool dump_if_enabled,
const char* name_hint) {
@@ -1956,9 +1969,9 @@ static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
}
#endif
- CodeMap::Locator cur;
- CHECK(map->Insert(range, &cur));
- cur.set_value(entry);
+ auto result = map->emplace(range, entry);
+ DCHECK(result.second); // Insertion happened.
+ USE(result);
RegisterCodeEntry(entry);
}
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 534898fdf5..e8c9588bbe 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -184,6 +184,24 @@ void InstructionTable::AddJumpConditionalShort() {
}
}
+namespace {
+int8_t Imm8(const uint8_t* data) {
+ return *reinterpret_cast<const int8_t*>(data);
+}
+uint8_t Imm8_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint8_t*>(data);
+}
+int16_t Imm16(const uint8_t* data) {
+ return *reinterpret_cast<const int16_t*>(data);
+}
+uint16_t Imm16_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint16_t*>(data);
+}
+int32_t Imm32(const uint8_t* data) {
+ return *reinterpret_cast<const int32_t*>(data);
+}
+} // namespace
+
// The IA32 disassembler implementation.
class DisassemblerIA32 {
public:
@@ -373,8 +391,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
switch (mod) {
case 0:
if (rm == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[0x%x]", disp);
+ AppendToBuffer("[0x%x]", Imm32(modrmp + 1));
return 5;
} else if (rm == esp) {
byte sib = *(modrmp + 1);
@@ -384,7 +401,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
AppendToBuffer("[%s]", (this->*register_name)(rm));
return 2;
} else if (base == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ int32_t disp = Imm32(modrmp + 2);
AppendToBuffer("[%s*%d%s0x%x]", (this->*register_name)(index),
1 << scale, disp < 0 ? "-" : "+",
disp < 0 ? -disp : disp);
@@ -409,8 +426,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
byte sib = *(modrmp + 1);
int scale, index, base;
get_sib(sib, &scale, &index, &base);
- int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<int8_t*>(modrmp + 2);
+ int disp = mod == 2 ? Imm32(modrmp + 2) : Imm8(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
@@ -422,8 +438,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
return mod == 2 ? 6 : 3;
} else {
// No sib.
- int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<int8_t*>(modrmp + 1);
+ int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
return mod == 2 ? 5 : 2;
@@ -517,7 +532,7 @@ int DisassemblerIA32::PrintImmediateOp(byte* data) {
AppendToBuffer(",0x%x", *(data + 1 + count));
return 1 + count + 1 /*int8*/;
} else {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+ AppendToBuffer(",0x%x", Imm32(data + 1 + count));
return 1 + count + 4 /*int32_t*/;
}
}
@@ -557,7 +572,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
AppendToBuffer("%s ", mnem);
int count = PrintRightOperand(data);
if (regop == 0) {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count));
+ AppendToBuffer(",0x%x", Imm32(data + count));
count += 4;
}
return 1 + count;
@@ -627,7 +642,7 @@ int DisassemblerIA32::JumpShort(byte* data) {
int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
DCHECK_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
+ byte* dest = data + Imm32(data + 2) + 6;
const char* mnem = jump_conditional_mnem[cond];
AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
if (comment != nullptr) {
@@ -775,56 +790,53 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(current));
+ AppendToBuffer(",%d", Imm8_U(current));
current++;
break;
case 0x0F:
AppendToBuffer("vpalignr %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(current));
+ AppendToBuffer(",%d", Imm8_U(current));
current++;
break;
case 0x14:
AppendToBuffer("vpextrb ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(current));
current++;
break;
case 0x15:
AppendToBuffer("vpextrw ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(current));
current++;
break;
case 0x16:
AppendToBuffer("vpextrd ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(current));
current++;
break;
case 0x20:
AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x21:
AppendToBuffer("vinsertps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x22:
AppendToBuffer("vpinsrd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
default:
@@ -872,7 +884,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
case 0x70:
AppendToBuffer("vpshuflw %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x7C:
@@ -933,7 +945,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
case 0x70:
AppendToBuffer("vpshufhw %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x7f:
@@ -1173,7 +1185,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
case 0x70:
AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x71:
@@ -1197,7 +1209,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
@@ -1615,8 +1627,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case MOVE_REG_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ byte* addr = reinterpret_cast<byte*>(Imm32(data + 1));
AppendToBuffer("mov %s,%s", NameOfCPURegister(*data & 0x07),
NameOfAddress(addr));
data += 5;
@@ -1624,15 +1635,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+ byte* addr = data + Imm32(data + 1) + 5;
AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case SHORT_IMMEDIATE_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ byte* addr = reinterpret_cast<byte*>(Imm32(data + 1));
AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
@@ -1656,7 +1666,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (!processed) {
switch (*data) {
case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
+ AppendToBuffer("ret 0x%x", Imm16_U(data + 1));
data += 3;
break;
@@ -1670,7 +1680,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x69: {
data++;
data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
- AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
+ AppendToBuffer(",%d", Imm32(data));
data += 4;
} break;
@@ -1860,6 +1870,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
+ } else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xF0) {
+ AppendToBuffer("mfence");
+ data += 3;
} else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
AppendToBuffer("lfence");
data += 3;
@@ -1920,8 +1933,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer("%s ", "mov");
data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm32(data));
data += 4;
}
} break;
@@ -1980,8 +1992,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
AppendToBuffer("cmpw ");
data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else if (*data == 0x87) {
data++;
@@ -2005,15 +2016,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
AppendToBuffer("%s ", "mov_w");
data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else if (*data == 0xF7) {
data++;
AppendToBuffer("%s ", "test_w");
data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else if (*data == 0x0F) {
data++;
@@ -2062,7 +2071,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pblendw %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(data));
+ AppendToBuffer(",%d", Imm8_U(data));
data++;
} else if (*data == 0x0F) {
data++;
@@ -2070,7 +2079,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("palignr %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(data));
+ AppendToBuffer(",%d", Imm8_U(data));
data++;
} else if (*data == 0x14) {
data++;
@@ -2078,8 +2087,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pextrb ");
data += PrintRightOperand(data);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(data));
data++;
} else if (*data == 0x15) {
data++;
@@ -2087,8 +2095,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pextrw ");
data += PrintRightOperand(data);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(data));
data++;
} else if (*data == 0x16) {
data++;
@@ -2096,8 +2103,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pextrd ");
data += PrintRightOperand(data);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(data));
data++;
} else if (*data == 0x17) {
data++;
@@ -2113,7 +2119,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pinsrb %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0x21) {
data++;
@@ -2121,7 +2127,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0x22) {
data++;
@@ -2129,7 +2135,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pinsrd %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else {
UnimplementedInstruction();
@@ -2193,7 +2199,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pshufd %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0x90) {
data++;
@@ -2257,7 +2263,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pinsrw %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0xE7) {
data++;
@@ -2309,22 +2315,22 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} break;
case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm32(data + 1));
data += 5;
break;
case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm8(data + 1));
data += 2;
break;
case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
+ AppendToBuffer("test al,0x%x", Imm8_U(data + 1));
data += 2;
break;
case 0xA9:
- AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data + 1));
+ AppendToBuffer("test eax,0x%x", Imm32(data + 1));
data += 5;
break;
@@ -2377,7 +2383,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pshuflw %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else {
const char* mnem = "?";
@@ -2477,7 +2483,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pshufhw %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (b2 == 0x7F) {
AppendToBuffer("movdqu ");
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index c5219970cb..dc3b3b8091 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -63,10 +63,11 @@
#include "src/objects/struct-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects-inl.h"
#include "torque-generated/class-verifiers-tq.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -127,13 +128,6 @@ void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p) {
}
}
-namespace {
-void VerifyForeignPointer(Isolate* isolate, HeapObject host, Object foreign) {
- host.VerifyPointer(isolate, foreign);
- CHECK(foreign.IsUndefined(isolate) || Foreign::IsNormalized(foreign));
-}
-} // namespace
-
void Smi::SmiVerify(Isolate* isolate) {
CHECK(IsSmi());
CHECK(!IsCallable());
@@ -153,6 +147,10 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
SlicedString::cast(*this).SlicedStringVerify(isolate);
} else if (IsThinString()) {
ThinString::cast(*this).ThinStringVerify(isolate);
+ } else if (IsSeqString()) {
+ SeqString::cast(*this).SeqStringVerify(isolate);
+ } else if (IsExternalString()) {
+ ExternalString::cast(*this).ExternalStringVerify(isolate);
} else {
String::cast(*this).StringVerify(isolate);
}
@@ -293,8 +291,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
JSAsyncGeneratorObject::cast(*this).JSAsyncGeneratorObjectVerify(isolate);
break;
- case JS_VALUE_TYPE:
- JSValue::cast(*this).JSValueVerify(isolate);
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ JSPrimitiveWrapper::cast(*this).JSPrimitiveWrapperVerify(isolate);
break;
case JS_DATE_TYPE:
JSDate::cast(*this).JSDateVerify(isolate);
@@ -420,6 +418,12 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
SmallOrderedNameDictionary::cast(*this).SmallOrderedNameDictionaryVerify(
isolate);
break;
+ case SOURCE_TEXT_MODULE_TYPE:
+ SourceTextModule::cast(*this).SourceTextModuleVerify(isolate);
+ break;
+ case SYNTHETIC_MODULE_TYPE:
+ SyntheticModule::cast(*this).SyntheticModuleVerify(isolate);
+ break;
case CODE_DATA_CONTAINER_TYPE:
CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
break;
@@ -502,6 +506,11 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
CHECK(IsBytecodeArray());
CHECK(constant_pool().IsFixedArray());
VerifyHeapPointer(isolate, constant_pool());
+ CHECK(source_position_table().IsUndefined() ||
+ source_position_table().IsException() ||
+ source_position_table().IsByteArray() ||
+ source_position_table().IsSourcePositionTableWithFrameCache());
+ CHECK(handler_table().IsByteArray());
}
USE_TORQUE_VERIFIER(FreeSpace)
@@ -515,10 +524,13 @@ void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) {
CHECK(code->IsSmi() || code->IsWeakOrCleared());
}
-bool JSObject::ElementsAreSafeToExamine() const {
+USE_TORQUE_VERIFIER(JSReceiver)
+
+bool JSObject::ElementsAreSafeToExamine(Isolate* isolate) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- return elements() != GetReadOnlyRoots().one_pointer_filler_map();
+ return elements(isolate) !=
+ GetReadOnlyRoots(isolate).one_pointer_filler_map();
}
namespace {
@@ -624,7 +636,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- if (ElementsAreSafeToExamine()) {
+ if (ElementsAreSafeToExamine(isolate)) {
CHECK_EQ((map().has_fast_smi_or_object_elements() ||
map().has_frozen_or_sealed_elements() ||
(elements() == GetReadOnlyRoots().empty_fixed_array()) ||
@@ -699,6 +711,8 @@ void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
}
}
+USE_TORQUE_VERIFIER(FixedArrayBase)
+
USE_TORQUE_VERIFIER(FixedArray)
void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
@@ -709,6 +723,8 @@ void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
}
void WeakArrayList::WeakArrayListVerify(Isolate* isolate) {
+ VerifySmiField(kCapacityOffset);
+ VerifySmiField(kLengthOffset);
for (int i = 0; i < length(); i++) {
MaybeObject::VerifyMaybeObjectPointer(isolate, Get(i));
}
@@ -774,24 +790,27 @@ void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*this, isolate);
for (int i = 0; i < number_of_all_descriptors(); i++) {
- MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToKeyIndex(i)));
- MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToDetailsIndex(i)));
- MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToValueIndex(i)));
+ MaybeObjectSlot slot(GetDescriptorSlot(i));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryKeyIndex));
+ MaybeObject::VerifyMaybeObjectPointer(isolate,
+ *(slot + kEntryDetailsIndex));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryValueIndex));
}
if (number_of_all_descriptors() == 0) {
- Heap* heap = isolate->heap();
- CHECK_EQ(ReadOnlyRoots(heap).empty_descriptor_array(), *this);
+ CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(), *this);
CHECK_EQ(0, number_of_all_descriptors());
CHECK_EQ(0, number_of_descriptors());
- CHECK_EQ(ReadOnlyRoots(heap).empty_enum_cache(), enum_cache());
+ CHECK_EQ(ReadOnlyRoots(isolate).empty_enum_cache(), enum_cache());
} else {
CHECK_LT(0, number_of_all_descriptors());
CHECK_LE(number_of_descriptors(), number_of_all_descriptors());
- // Check that properties with private symbols names are non-enumerable.
+ // Check that properties with private symbols names are non-enumerable, and
+ // that fields are in order.
+ int expected_field_index = 0;
for (int descriptor = 0; descriptor < number_of_descriptors();
descriptor++) {
- Object key = get(ToKeyIndex(descriptor))->cast<Object>();
+ Object key = *(GetDescriptorSlot(descriptor) + kEntryKeyIndex);
// number_of_descriptors() may be out of sync with the actual descriptors
// written during descriptor array construction.
if (key.IsUndefined(isolate)) continue;
@@ -799,14 +818,16 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
if (Name::cast(key).IsPrivate()) {
CHECK_NE(details.attributes() & DONT_ENUM, 0);
}
- MaybeObject value = get(ToValueIndex(descriptor));
+ MaybeObject value = GetValue(descriptor);
HeapObject heap_object;
if (details.location() == kField) {
+ CHECK_EQ(details.field_index(), expected_field_index);
CHECK(
value == MaybeObject::FromObject(FieldType::None()) ||
value == MaybeObject::FromObject(FieldType::Any()) ||
value->IsCleared() ||
(value->GetHeapObjectIfWeak(&heap_object) && heap_object.IsMap()));
+ expected_field_index += details.field_width_in_words();
} else {
CHECK(!value->IsWeakOrCleared());
CHECK(!value->cast<Object>().IsMap());
@@ -905,8 +926,6 @@ void JSAsyncGeneratorObject::JSAsyncGeneratorObjectVerify(Isolate* isolate) {
queue().HeapObjectVerify(isolate);
}
-USE_TORQUE_VERIFIER(JSValue)
-
void JSDate::JSDateVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSDateVerify(*this, isolate);
@@ -940,13 +959,7 @@ void JSDate::JSDateVerify(Isolate* isolate) {
}
}
-void JSMessageObject::JSMessageObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSMessageObjectVerify(*this, isolate);
- VerifySmiField(kMessageTypeOffset);
- VerifySmiField(kStartPositionOffset);
- VerifySmiField(kEndPositionOffset);
- VerifySmiField(kErrorLevelOffset);
-}
+USE_TORQUE_VERIFIER(JSMessageObject)
void String::StringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::StringVerify(*this, isolate);
@@ -982,19 +995,16 @@ void SlicedString::SlicedStringVerify(Isolate* isolate) {
CHECK_GE(this->length(), SlicedString::kMinLength);
}
+USE_TORQUE_VERIFIER(ExternalString)
+
void JSBoundFunction::JSBoundFunctionVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSBoundFunctionVerify(*this, isolate);
CHECK(IsCallable());
-
- if (!raw_bound_target_function().IsUndefined(isolate)) {
- CHECK(bound_target_function().IsCallable());
- CHECK_EQ(IsConstructor(), bound_target_function().IsConstructor());
- }
+ CHECK_EQ(IsConstructor(), bound_target_function().IsConstructor());
}
void JSFunction::JSFunctionVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSFunctionVerify(*this, isolate);
- CHECK(raw_feedback_cell().IsFeedbackCell());
CHECK(code().IsCode());
CHECK(map().is_callable());
Handle<JSFunction> function(*this, isolate);
@@ -1168,13 +1178,12 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSArrayVerify(*this, isolate);
// If a GC was caused while constructing this array, the elements
// pointer may point to a one pointer filler map.
- if (!ElementsAreSafeToExamine()) return;
+ if (!ElementsAreSafeToExamine(isolate)) return;
if (elements().IsUndefined(isolate)) return;
CHECK(elements().IsFixedArray() || elements().IsFixedDoubleArray());
if (elements().length() == 0) {
CHECK_EQ(elements(), ReadOnlyRoots(isolate).empty_fixed_array());
}
- if (!length().IsNumber()) return;
// Verify that the length and the elements backing store are in sync.
if (length().IsSmi() && (HasFastElements() || HasFrozenOrSealedElements())) {
if (elements().length() > 0) {
@@ -1206,32 +1215,32 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
}
}
+USE_TORQUE_VERIFIER(JSCollection)
+
void JSSet::JSSetVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSSetVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsOrderedHashSet() || table().IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
void JSMap::JSMapVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSMapVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsOrderedHashMap() || table().IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
+USE_TORQUE_VERIFIER(JSCollectionIterator)
+
void JSSetIterator::JSSetIteratorVerify(Isolate* isolate) {
CHECK(IsJSSetIterator());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, table());
+ JSCollectionIteratorVerify(isolate);
CHECK(table().IsOrderedHashSet());
CHECK(index().IsSmi());
}
void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
CHECK(IsJSMapIterator());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, table());
+ JSCollectionIteratorVerify(isolate);
CHECK(table().IsOrderedHashMap());
CHECK(index().IsSmi());
}
@@ -1257,14 +1266,8 @@ void WeakCell::WeakCellVerify(Isolate* isolate) {
key_list_next().IsUndefined(isolate));
CHECK(key_list_prev().IsWeakCell() || key_list_prev().IsUndefined(isolate));
- if (key_list_prev().IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(key_list_prev()).key_list_next(), *this);
- }
CHECK(key_list_next().IsWeakCell() || key_list_next().IsUndefined(isolate));
- if (key_list_next().IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(key_list_next()).key_list_prev(), *this);
- }
CHECK(finalization_group().IsUndefined(isolate) ||
finalization_group().IsJSFinalizationGroup());
@@ -1288,6 +1291,7 @@ void JSFinalizationGroup::JSFinalizationGroupVerify(Isolate* isolate) {
if (cleared_cells().IsWeakCell()) {
CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
}
+ CHECK(next().IsUndefined(isolate) || next().IsJSFinalizationGroup());
}
void JSFinalizationGroupCleanupIterator::
@@ -1305,13 +1309,11 @@ void FinalizationGroupCleanupJobTask::FinalizationGroupCleanupJobTaskVerify(
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSWeakMapVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
}
void JSArrayIterator::JSArrayIteratorVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSArrayIteratorVerify(*this, isolate);
- CHECK(iterated_object().IsJSReceiver());
CHECK_GE(next_index().Number(), 0);
CHECK_LE(next_index().Number(), kMaxSafeInteger);
@@ -1328,17 +1330,16 @@ void JSArrayIterator::JSArrayIteratorVerify(Isolate* isolate) {
void JSStringIterator::JSStringIteratorVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSStringIteratorVerify(*this, isolate);
- CHECK(string().IsString());
-
CHECK_GE(index(), 0);
CHECK_LE(index(), String::kMaxLength);
}
USE_TORQUE_VERIFIER(JSAsyncFromSyncIterator)
+USE_TORQUE_VERIFIER(JSWeakCollection)
+
void JSWeakSet::JSWeakSetVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSWeakSetVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
}
@@ -1351,11 +1352,7 @@ void CallableTask::CallableTaskVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(CallbackTask)
-void PromiseReactionJobTask::PromiseReactionJobTaskVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::PromiseReactionJobTaskVerify(*this, isolate);
- VerifyHeapPointer(isolate, handler());
- CHECK(handler().IsUndefined(isolate) || handler().IsCallable());
-}
+USE_TORQUE_VERIFIER(PromiseReactionJobTask)
USE_TORQUE_VERIFIER(PromiseFulfillReactionJobTask)
@@ -1369,7 +1366,6 @@ USE_TORQUE_VERIFIER(PromiseReaction)
void JSPromise::JSPromiseVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSPromiseVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
if (status() == Promise::kPending) {
CHECK(reactions().IsSmi() || reactions().IsPromiseReaction());
}
@@ -1460,7 +1456,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
break;
}
case JSRegExp::IRREGEXP: {
- bool is_native = RegExpImpl::UsesNativeRegExp();
+ bool is_native = RegExp::GeneratesNativeCode();
FixedArray arr = FixedArray::cast(data());
Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
@@ -1485,11 +1481,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
}
}
-void JSRegExpStringIterator::JSRegExpStringIteratorVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSRegExpStringIteratorVerify(*this, isolate);
- CHECK(iterating_string().IsString());
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSRegExpStringIterator)
void JSProxy::JSProxyVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSProxyVerify(*this, isolate);
@@ -1548,50 +1540,49 @@ void BigInt::BigIntVerify(Isolate* isolate) {
CHECK_IMPLIES(is_zero(), !sign()); // There is no -0n.
}
-void JSModuleNamespace::JSModuleNamespaceVerify(Isolate* isolate) {
- CHECK(IsJSModuleNamespace());
- VerifyPointer(isolate, module());
-}
+USE_TORQUE_VERIFIER(JSModuleNamespace)
-void ModuleInfoEntry::ModuleInfoEntryVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::ModuleInfoEntryVerify(*this, isolate);
+void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryVerify(
+ Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SourceTextModuleInfoEntryVerify(*this,
+ isolate);
CHECK_IMPLIES(import_name().IsString(), module_request() >= 0);
CHECK_IMPLIES(export_name().IsString() && import_name().IsString(),
local_name().IsUndefined(isolate));
}
void Module::ModuleVerify(Isolate* isolate) {
- CHECK(IsModule());
-
- VerifyPointer(isolate, code());
- VerifyPointer(isolate, exports());
- VerifyPointer(isolate, module_namespace());
- VerifyPointer(isolate, requested_modules());
- VerifyPointer(isolate, script());
- VerifyPointer(isolate, import_meta());
- VerifyPointer(isolate, exception());
- VerifySmiField(kHashOffset);
- VerifySmiField(kStatusOffset);
-
- CHECK((status() >= kEvaluating && code().IsModuleInfo()) ||
- (status() == kInstantiated && code().IsJSGeneratorObject()) ||
- (status() == kInstantiating && code().IsJSFunction()) ||
- (code().IsSharedFunctionInfo()));
+ TorqueGeneratedClassVerifiers::ModuleVerify(*this, isolate);
- CHECK_EQ(status() == kErrored, !exception().IsTheHole(isolate));
+ CHECK_EQ(status() == Module::kErrored, !exception().IsTheHole(isolate));
CHECK(module_namespace().IsUndefined(isolate) ||
module_namespace().IsJSModuleNamespace());
if (module_namespace().IsJSModuleNamespace()) {
- CHECK_LE(kInstantiating, status());
+ CHECK_LE(Module::kInstantiating, status());
CHECK_EQ(JSModuleNamespace::cast(module_namespace()).module(), *this);
}
+ CHECK_NE(hash(), 0);
+}
+
+void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SourceTextModuleVerify(*this, isolate);
+
+ CHECK((status() >= kEvaluating && code().IsSourceTextModuleInfo()) ||
+ (status() == kInstantiated && code().IsJSGeneratorObject()) ||
+ (status() == kInstantiating && code().IsJSFunction()) ||
+ (code().IsSharedFunctionInfo()));
+
CHECK_EQ(requested_modules().length(), info().module_requests().length());
+}
- CHECK(import_meta().IsTheHole(isolate) || import_meta().IsJSObject());
+void SyntheticModule::SyntheticModuleVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SyntheticModuleVerify(*this, isolate);
- CHECK_NE(hash(), 0);
+ for (int i = 0; i < export_names().length(); i++) {
+ CHECK(export_names().get(i).IsString());
+ }
}
void PrototypeInfo::PrototypeInfoVerify(Isolate* isolate) {
@@ -1646,8 +1637,6 @@ void EnumCache::EnumCacheVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(SourcePositionTableWithFrameCache)
-
USE_TORQUE_VERIFIER(ClassPositions)
void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
@@ -1686,31 +1675,15 @@ void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
wrapper_code().kind() == Code::C_WASM_ENTRY);
}
-void WasmModuleObject::WasmModuleObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmModuleObjectVerify(*this, isolate);
- CHECK(managed_native_module().IsForeign());
- CHECK(export_wrappers().IsFixedArray());
- CHECK(script().IsScript());
-}
+USE_TORQUE_VERIFIER(WasmModuleObject)
-void WasmTableObject::WasmTableObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmTableObjectVerify(*this, isolate);
- CHECK(elements().IsFixedArray());
- VerifySmiField(kRawTypeOffset);
-}
+USE_TORQUE_VERIFIER(WasmTableObject)
-void WasmMemoryObject::WasmMemoryObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmMemoryObjectVerify(*this, isolate);
- CHECK(array_buffer().IsJSArrayBuffer());
- VerifySmiField(kMaximumPagesOffset);
-}
+USE_TORQUE_VERIFIER(WasmMemoryObject)
USE_TORQUE_VERIFIER(WasmGlobalObject)
-void WasmExceptionObject::WasmExceptionObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmExceptionObjectVerify(*this, isolate);
- CHECK(serialized_signature().IsByteArray());
-}
+USE_TORQUE_VERIFIER(WasmExceptionObject)
void DataHandler::DataHandlerVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::DataHandlerVerify(*this, isolate);
@@ -1738,39 +1711,22 @@ void StoreHandler::StoreHandlerVerify(Isolate* isolate) {
// TODO(ishell): check handler integrity
}
-void AccessorInfo::AccessorInfoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::AccessorInfoVerify(*this, isolate);
- VerifyForeignPointer(isolate, *this, getter());
- VerifyForeignPointer(isolate, *this, setter());
- VerifyForeignPointer(isolate, *this, js_getter());
-}
+USE_TORQUE_VERIFIER(AccessorInfo)
USE_TORQUE_VERIFIER(AccessorPair)
USE_TORQUE_VERIFIER(AccessCheckInfo)
void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
- CHECK(IsCallHandlerInfo());
+ TorqueGeneratedClassVerifiers::CallHandlerInfoVerify(*this, isolate);
CHECK(map() == ReadOnlyRoots(isolate).side_effect_call_handler_info_map() ||
map() ==
ReadOnlyRoots(isolate).side_effect_free_call_handler_info_map() ||
map() == ReadOnlyRoots(isolate)
.next_call_side_effect_free_call_handler_info_map());
- VerifyPointer(isolate, callback());
- VerifyPointer(isolate, js_callback());
- VerifyPointer(isolate, data());
}
-void InterceptorInfo::InterceptorInfoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::InterceptorInfoVerify(*this, isolate);
- VerifyForeignPointer(isolate, *this, getter());
- VerifyForeignPointer(isolate, *this, setter());
- VerifyForeignPointer(isolate, *this, query());
- VerifyForeignPointer(isolate, *this, descriptor());
- VerifyForeignPointer(isolate, *this, deleter());
- VerifyForeignPointer(isolate, *this, enumerator());
- VerifyForeignPointer(isolate, *this, definer());
-}
+USE_TORQUE_VERIFIER(InterceptorInfo)
USE_TORQUE_VERIFIER(TemplateInfo)
@@ -1782,6 +1738,8 @@ USE_TORQUE_VERIFIER(WasmCapiFunctionData)
USE_TORQUE_VERIFIER(WasmJSFunctionData)
+USE_TORQUE_VERIFIER(WasmIndirectFunctionTable)
+
USE_TORQUE_VERIFIER(ObjectTemplateInfo)
void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
@@ -1792,11 +1750,7 @@ void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
CHECK(nested_site().IsAllocationSite() || nested_site() == Smi::kZero);
}
-void AllocationMemento::AllocationMementoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::AllocationMementoVerify(*this, isolate);
- VerifyHeapPointer(isolate, allocation_site());
- CHECK(!IsValid() || GetAllocationSite().IsAllocationSite());
-}
+USE_TORQUE_VERIFIER(AllocationMemento)
void Script::ScriptVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::ScriptVerify(*this, isolate);
@@ -1859,62 +1813,26 @@ void UncompiledDataWithoutPreparseData::UncompiledDataWithoutPreparseDataVerify(
USE_TORQUE_VERIFIER(InterpreterData)
#ifdef V8_INTL_SUPPORT
-void JSV8BreakIterator::JSV8BreakIteratorVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kTypeOffset);
- VerifyObjectField(isolate, kBreakIteratorOffset);
- VerifyObjectField(isolate, kUnicodeStringOffset);
- VerifyObjectField(isolate, kBoundAdoptTextOffset);
- VerifyObjectField(isolate, kBoundFirstOffset);
- VerifyObjectField(isolate, kBoundNextOffset);
- VerifyObjectField(isolate, kBoundCurrentOffset);
- VerifyObjectField(isolate, kBoundBreakTypeOffset);
-}
-
-void JSCollator::JSCollatorVerify(Isolate* isolate) {
- CHECK(IsJSCollator());
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kICUCollatorOffset);
- VerifyObjectField(isolate, kBoundCompareOffset);
-}
-void JSDateTimeFormat::JSDateTimeFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSDateTimeFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSV8BreakIterator)
-void JSListFormat::JSListFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSListFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSCollator)
+
+USE_TORQUE_VERIFIER(JSDateTimeFormat)
+
+USE_TORQUE_VERIFIER(JSListFormat)
USE_TORQUE_VERIFIER(JSLocale)
-void JSNumberFormat::JSNumberFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSNumberFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSNumberFormat)
-void JSPluralRules::JSPluralRulesVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSPluralRulesVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSPluralRules)
-void JSRelativeTimeFormat::JSRelativeTimeFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSRelativeTimeFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSRelativeTimeFormat)
-void JSSegmentIterator::JSSegmentIteratorVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSSegmentIteratorVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSSegmentIterator)
-void JSSegmenter::JSSegmenterVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSSegmenterVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSSegmenter)
#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index e65c0af190..5284208285 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -50,6 +50,7 @@
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segmenter-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/compiler/node.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
@@ -60,11 +61,13 @@
#include "src/objects/struct-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -217,8 +220,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_ASYNC_FUNCTION_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
// TODO(titzer): debug printing for more wasm objects
@@ -240,6 +241,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case WASM_INSTANCE_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectPrint(os);
break;
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
JSGeneratorObject::cast(*this).JSGeneratorObjectPrint(os);
break;
@@ -270,8 +273,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_GLOBAL_OBJECT_TYPE:
JSGlobalObject::cast(*this).JSGlobalObjectPrint(os);
break;
- case JS_VALUE_TYPE:
- JSValue::cast(*this).JSValuePrint(os);
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ JSPrimitiveWrapper::cast(*this).JSPrimitiveWrapperPrint(os);
break;
case JS_DATE_TYPE:
JSDate::cast(*this).JSDatePrint(os);
@@ -414,6 +417,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case SCOPE_INFO_TYPE:
ScopeInfo::cast(*this).ScopeInfoPrint(os);
break;
+ case SOURCE_TEXT_MODULE_TYPE:
+ SourceTextModule::cast(*this).SourceTextModulePrint(os);
+ break;
+ case SYNTHETIC_MODULE_TYPE:
+ SyntheticModule::cast(*this).SyntheticModulePrint(os);
+ break;
case FEEDBACK_METADATA_TYPE:
FeedbackMetadata::cast(*this).FeedbackMetadataPrint(os);
break;
@@ -1191,8 +1200,8 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
}
}
-void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, *this, "JSValue");
+void JSPrimitiveWrapper::JSPrimitiveWrapperPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, *this, "JSPrimitiveWrapper");
os << "\n - value: " << Brief(value());
JSObjectPrintBody(os, *this);
}
@@ -1730,8 +1739,9 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(
os << "\n";
}
-void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "ModuleInfoEntry");
+void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryPrint(
+ std::ostream& os) { // NOLINT
+ PrintHeader(os, "SourceTextModuleInfoEntry");
os << "\n - export_name: " << Brief(export_name());
os << "\n - local_name: " << Brief(local_name());
os << "\n - import_name: " << Brief(import_name());
@@ -1742,16 +1752,37 @@ void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+static void PrintModuleFields(Module module, std::ostream& os) {
+ os << "\n - exports: " << Brief(module.exports());
+ os << "\n - status: " << module.status();
+ os << "\n - exception: " << Brief(module.exception());
+}
+
void Module::ModulePrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "Module");
+ if (this->IsSourceTextModule()) {
+ SourceTextModule::cast(*this).SourceTextModulePrint(os);
+ } else if (this->IsSyntheticModule()) {
+ SyntheticModule::cast(*this).SyntheticModulePrint(os);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void SourceTextModule::SourceTextModulePrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "SourceTextModule");
+ PrintModuleFields(*this, os);
os << "\n - origin: " << Brief(script().GetNameOrSourceURL());
os << "\n - code: " << Brief(code());
- os << "\n - exports: " << Brief(exports());
os << "\n - requested_modules: " << Brief(requested_modules());
os << "\n - script: " << Brief(script());
os << "\n - import_meta: " << Brief(import_meta());
- os << "\n - status: " << status();
- os << "\n - exception: " << Brief(exception());
+ os << "\n";
+}
+
+void SyntheticModule::SyntheticModulePrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "SyntheticModule");
+ PrintModuleFields(*this, os);
+ os << "\n - export_names: " << Brief(export_names());
os << "\n";
}
@@ -2040,6 +2071,20 @@ void WasmCapiFunctionData::WasmCapiFunctionDataPrint(
os << "\n";
}
+void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
+ std::ostream& os) {
+ PrintHeader(os, "WasmIndirectFunctionTable");
+ os << "\n - size: " << size();
+ os << "\n - sig_ids: " << static_cast<void*>(sig_ids());
+ os << "\n - targets: " << static_cast<void*>(targets());
+ if (has_managed_native_allocations()) {
+ os << "\n - managed_native_allocations: "
+ << Brief(managed_native_allocations());
+ }
+ os << "\n - refs: " << Brief(refs());
+ os << "\n";
+}
+
void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "ObjectTemplateInfo");
os << "\n - tag: " << Brief(tag());
@@ -2167,7 +2212,7 @@ void JSPluralRules::JSPluralRulesPrint(std::ostream& os) { // NOLINT
os << "\n - locale: " << Brief(locale());
os << "\n - type: " << TypeAsString();
os << "\n - icu plural rules: " << Brief(icu_plural_rules());
- os << "\n - icu decimal format: " << Brief(icu_decimal_format());
+ os << "\n - icu_number_formatter: " << Brief(icu_number_formatter());
JSObjectPrintBody(os, *this);
}
@@ -2483,7 +2528,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_stable()) os << "\n - stable_map";
if (is_migration_target()) os << "\n - migration_target";
if (is_dictionary_map()) os << "\n - dictionary_map";
- if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
if (has_named_interceptor()) os << "\n - named_interceptor";
if (has_indexed_interceptor()) os << "\n - indexed_interceptor";
if (may_have_interesting_symbols()) os << "\n - may_have_interesting_symbols";
@@ -2511,10 +2555,10 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
layout_descriptor().ShortPrint(os);
}
- Isolate* isolate;
// Read-only maps can't have transitions, which is fortunate because we need
// the isolate to iterate over the transitions.
- if (GetIsolateFromWritableObject(*this, &isolate)) {
+ if (!IsReadOnlyHeapObject(*this)) {
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
DisallowHeapAllocation no_gc;
TransitionsAccessor transitions(isolate, *this, &no_gc);
int nof_transitions = transitions.NumberOfTransitions();
@@ -2812,3 +2856,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
#endif
}
}
+
+V8_EXPORT_PRIVATE extern void _v8_internal_Node_Print(void* object) {
+ reinterpret_cast<i::compiler::Node*>(object)->Print();
+}
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 096ffa2d48..8fb01dba9a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -10,37 +10,6 @@
#include "src/codegen/x64/assembler-x64.h"
#include "src/utils/allocation.h"
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index ab8ba34d90..493c56996b 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -237,6 +237,30 @@ static const InstructionDesc cmov_instructions[16] = {
{"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}};
+namespace {
+int8_t Imm8(const uint8_t* data) {
+ return *reinterpret_cast<const int8_t*>(data);
+}
+uint8_t Imm8_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint8_t*>(data);
+}
+int16_t Imm16(const uint8_t* data) {
+ return *reinterpret_cast<const int16_t*>(data);
+}
+uint16_t Imm16_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint16_t*>(data);
+}
+int32_t Imm32(const uint8_t* data) {
+ return *reinterpret_cast<const int32_t*>(data);
+}
+uint32_t Imm32_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint32_t*>(data);
+}
+int64_t Imm64(const uint8_t* data) {
+ return *reinterpret_cast<const int64_t*>(data);
+}
+} // namespace
+
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
@@ -458,8 +482,7 @@ int DisassemblerX64::PrintRightOperandHelper(
switch (mod) {
case 0:
if ((rm & 7) == 5) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[rip+0x%x]", disp);
+ AppendToBuffer("[rip+0x%x]", Imm32(modrmp + 1));
return 5;
} else if ((rm & 7) == 4) {
// Codes for SIB byte.
@@ -473,7 +496,7 @@ int DisassemblerX64::PrintRightOperandHelper(
return 2;
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ int32_t disp = Imm32(modrmp + 2);
AppendToBuffer("[%s*%d%s0x%x]", NameOfCPURegister(index), 1 << scale,
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
return 6;
@@ -497,8 +520,7 @@ int DisassemblerX64::PrintRightOperandHelper(
byte sib = *(modrmp + 1);
int scale, index, base;
get_sib(sib, &scale, &index, &base);
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<int8_t*>(modrmp + 2);
+ int disp = (mod == 2) ? Imm32(modrmp + 2) : Imm8(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
AppendToBuffer("[%s%s0x%x]", NameOfCPURegister(base),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
@@ -510,8 +532,7 @@ int DisassemblerX64::PrintRightOperandHelper(
return mod == 2 ? 6 : 3;
} else {
// No sib.
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<int8_t*>(modrmp + 1);
+ int disp = (mod == 2) ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
AppendToBuffer("[%s%s0x%x]", NameOfCPURegister(rm),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
if (rm == i::kRootRegister.code()) {
@@ -540,15 +561,15 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
count = 1;
break;
case OPERAND_WORD_SIZE:
- value = *reinterpret_cast<int16_t*>(data);
+ value = Imm16(data);
count = 2;
break;
case OPERAND_DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data);
+ value = Imm32_U(data);
count = 4;
break;
case OPERAND_QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data);
+ value = Imm32(data);
count = 4;
break;
default:
@@ -763,7 +784,7 @@ int DisassemblerX64::JumpShort(byte* data) {
int DisassemblerX64::JumpConditional(byte* data) {
DCHECK_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
+ byte* dest = data + Imm32(data + 2) + 6;
const char* mnem = conditional_code_suffix[cond];
AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
return 6; // includes 0x0F
@@ -1663,6 +1684,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_2_INSTRUCTION_LIST(SSE34_DIS_CASE)
#undef SSE34_DIS_CASE
default:
UnimplementedInstruction();
@@ -1715,13 +1737,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pextrd "); // reg/m32, xmm, imm8
+ // reg/m32/reg/m64, xmm, imm8
+ AppendToBuffer("pextr%c ", rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
} else if (third_byte == 0x20) {
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
+ AppendToBuffer("pinsrb "); // xmm, reg/m32, imm8
AppendToBuffer(" %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%d", (*current) & 3);
@@ -1735,7 +1758,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (third_byte == 0x22) {
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
+ // xmm, reg/m32/reg/m64, imm8
+ AppendToBuffer("pinsr%c ", rex_w() ? 'q' : 'd');
AppendToBuffer(" %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%d", (*current) & 3);
@@ -1871,6 +1895,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "psrlw";
} else if (opcode == 0xD2) {
mnemonic = "psrld";
+ } else if (opcode == 0xD4) {
+ mnemonic = "paddq";
} else if (opcode == 0xD5) {
mnemonic = "pmullw";
} else if (opcode == 0xD7) {
@@ -1880,9 +1906,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xD9) {
mnemonic = "psubusw";
} else if (opcode == 0xDA) {
- mnemonic = "pand";
- } else if (opcode == 0xDB) {
mnemonic = "pminub";
+ } else if (opcode == 0xDB) {
+ mnemonic = "pand";
} else if (opcode == 0xDC) {
mnemonic = "paddusb";
} else if (opcode == 0xDD) {
@@ -1921,6 +1947,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "psubw";
} else if (opcode == 0xFA) {
mnemonic = "psubd";
+ } else if (opcode == 0xFB) {
+ mnemonic = "psubq";
} else if (opcode == 0xFC) {
mnemonic = "paddb";
} else if (opcode == 0xFD) {
@@ -2262,7 +2290,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = true;
}
current += PrintOperands(mnemonic, OPER_REG_OP_ORDER, current);
- } else if (opcode == 0xAE && (*(data + 2) & 0xF8) == 0xE8) {
+ } else if (opcode == 0xAE && (data[2] & 0xF8) == 0xF0) {
+ AppendToBuffer("mfence");
+ current = data + 3;
+ } else if (opcode == 0xAE && (data[2] & 0xF8) == 0xE8) {
AppendToBuffer("lfence");
current = data + 3;
} else {
@@ -2415,18 +2446,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte* addr = nullptr;
switch (operand_size()) {
case OPERAND_WORD_SIZE:
- addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
+ addr = reinterpret_cast<byte*>(Imm16(data + 1));
data += 3;
break;
case OPERAND_DOUBLEWORD_SIZE:
- addr =
- reinterpret_cast<byte*>(*reinterpret_cast<uint32_t*>(data + 1));
+ addr = reinterpret_cast<byte*>(Imm32_U(data + 1));
data += 5;
break;
case OPERAND_QUADWORD_SIZE:
- addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
+ addr = reinterpret_cast<byte*>(Imm64(data + 1));
data += 9;
break;
default:
@@ -2439,7 +2467,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+ byte* addr = data + Imm32(data + 1) + 5;
AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
@@ -2448,10 +2476,10 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
int32_t imm;
if (operand_size() == OPERAND_WORD_SIZE) {
- imm = *reinterpret_cast<int16_t*>(data + 1);
+ imm = Imm16(data + 1);
data += 3;
} else {
- imm = *reinterpret_cast<int32_t*>(data + 1);
+ imm = Imm32(data + 1);
data += 5;
}
AppendToBuffer("%s rax,0x%x", idesc.mnem, imm);
@@ -2472,7 +2500,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (!processed) {
switch (*data) {
case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
+ AppendToBuffer("ret 0x%x", Imm16_U(data + 1));
data += 3;
break;
@@ -2556,12 +2584,10 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov%c ", operand_size_code());
data += PrintRightOperand(data);
if (operand_size() == OPERAND_WORD_SIZE) {
- int16_t imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else {
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm32(data));
data += 4;
}
}
@@ -2657,12 +2683,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
}
case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm32(data + 1));
data += 5;
break;
case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm8(data + 1));
data += 2;
break;
@@ -2670,8 +2696,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA3:
switch (operand_size()) {
case OPERAND_DOUBLEWORD_SIZE: {
- const char* memory_location = NameOfAddress(
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1)));
+ const char* memory_location =
+ NameOfAddress(reinterpret_cast<byte*>(Imm32(data + 1)));
if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movzxlq rax,(%s)", memory_location);
} else { // Opcode 0xA3
@@ -2683,7 +2709,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case OPERAND_QUADWORD_SIZE: {
// New x64 instruction mov rax,(imm_64).
const char* memory_location =
- NameOfAddress(*reinterpret_cast<byte**>(data + 1));
+ NameOfAddress(reinterpret_cast<byte*>(Imm64(data + 1)));
if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movq rax,(%s)", memory_location);
} else { // Opcode 0xA3
@@ -2699,7 +2725,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
+ AppendToBuffer("test al,0x%x", Imm8_U(data + 1));
data += 2;
break;
@@ -2707,15 +2733,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
int64_t value = 0;
switch (operand_size()) {
case OPERAND_WORD_SIZE:
- value = *reinterpret_cast<uint16_t*>(data + 1);
+ value = Imm16_U(data + 1);
data += 3;
break;
case OPERAND_DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data + 1);
+ value = Imm32_U(data + 1);
data += 5;
break;
case OPERAND_QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data + 1);
+ value = Imm32(data + 1);
data += 5;
break;
default:
@@ -2758,7 +2784,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x3C:
- AppendToBuffer("cmp al,0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("cmp al,0x%x", Imm8(data + 1));
data += 2;
break;
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index a62d530e1a..75c1a1b30e 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -1,10 +1,13 @@
-binji@chromium.org
bmeurer@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
mstarzinger@chromium.org
+mythria@chromium.org
+delphick@chromium.org
petermarshall@chromium.org
szuend@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.cc b/deps/v8/src/execution/arm/frame-constants-arm.cc
index af04813301..602242ac97 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.cc
+++ b/deps/v8/src/execution/arm/frame-constants-arm.cc
@@ -6,9 +6,9 @@
#include "src/execution/arm/frame-constants-arm.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.cc b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
index 89a5259e2b..607081a562 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
@@ -6,11 +6,11 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/execution/arm64/frame-constants-arm64.h"
+
#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/assembler.h"
-
-#include "src/execution/arm64/frame-constants-arm64.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 285b4b2134..06c4e3a6cc 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -5,32 +5,15 @@
#include "src/execution/execution.h"
#include "src/api/api-inl.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/debug/debug.h"
+#include "src/compiler/wasm-compiler.h" // Only for static asserts.
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
-#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
-#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
-void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
- DCHECK_NOT_NULL(isolate_);
- thread_local_.set_jslimit(kInterruptLimit);
- thread_local_.set_climit(kInterruptLimit);
- isolate_->heap()->SetStackLimits();
-}
-
-void StackGuard::reset_limits(const ExecutionAccess& lock) {
- DCHECK_NOT_NULL(isolate_);
- thread_local_.set_jslimit(thread_local_.real_jslimit_);
- thread_local_.set_climit(thread_local_.real_climit_);
- isolate_->heap()->SetStackLimits();
-}
-
namespace {
Handle<Object> NormalizeReceiver(Isolate* isolate, Handle<Object> receiver) {
@@ -235,6 +218,22 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
return isolate->factory()->undefined_value();
}
+ if (params.execution_target == Execution::Target::kCallable) {
+ Handle<Context> context = isolate->native_context();
+ if (!context->script_execution_callback().IsUndefined(isolate)) {
+ v8::Context::AbortScriptExecutionCallback callback =
+ v8::ToCData<v8::Context::AbortScriptExecutionCallback>(
+ context->script_execution_callback());
+ v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ callback(api_isolate, api_context);
+ DCHECK(!isolate->has_scheduled_exception());
+ // Always throw an exception to abort execution, if callback exists.
+ isolate->ThrowIllegalOperation();
+ return MaybeHandle<Object>();
+ }
+ }
+
// Placeholder for return value.
Object value;
@@ -406,271 +405,68 @@ MaybeHandle<Object> Execution::TryRunMicrotasks(
exception_out));
}
-void StackGuard::SetStackLimit(uintptr_t limit) {
- ExecutionAccess access(isolate_);
- // If the current limits are special (e.g. due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
- if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
- thread_local_.set_jslimit(jslimit);
- }
- if (thread_local_.climit() == thread_local_.real_climit_) {
- thread_local_.set_climit(limit);
- }
- thread_local_.real_climit_ = limit;
- thread_local_.real_jslimit_ = jslimit;
-}
-
-void StackGuard::AdjustStackLimitForSimulator() {
- ExecutionAccess access(isolate_);
- uintptr_t climit = thread_local_.real_climit_;
- // If the current limits are special (e.g. due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
- if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
- thread_local_.set_jslimit(jslimit);
- isolate_->heap()->SetStackLimits();
- }
-}
-
-void StackGuard::EnableInterrupts() {
- ExecutionAccess access(isolate_);
- if (has_pending_interrupts(access)) {
- set_interrupt_limits(access);
- }
-}
-
-void StackGuard::DisableInterrupts() {
- ExecutionAccess access(isolate_);
- reset_limits(access);
-}
-
-void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
- ExecutionAccess access(isolate_);
- DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
- if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
- // Intercept already requested interrupts.
- int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
- scope->intercepted_flags_ = intercepted;
- thread_local_.interrupt_flags_ &= ~intercepted;
- } else {
- DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
- // Restore postponed interrupts.
- int restored_flags = 0;
- for (InterruptsScope* current = thread_local_.interrupt_scopes_;
- current != nullptr; current = current->prev_) {
- restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
- current->intercepted_flags_ &= ~scope->intercept_mask_;
- }
- thread_local_.interrupt_flags_ |= restored_flags;
+struct StackHandlerMarker {
+ Address next;
+ Address padding;
+};
+STATIC_ASSERT(offsetof(StackHandlerMarker, next) ==
+ StackHandlerConstants::kNextOffset);
+STATIC_ASSERT(offsetof(StackHandlerMarker, padding) ==
+ StackHandlerConstants::kPaddingOffset);
+STATIC_ASSERT(sizeof(StackHandlerMarker) == StackHandlerConstants::kSize);
+
+void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
+ Address wasm_call_target, Handle<Object> object_ref,
+ Address packed_args) {
+ using WasmEntryStub = GeneratedCode<Address(
+ Address target, Address object_ref, Address argv, Address c_entry_fp)>;
+ WasmEntryStub stub_entry =
+ WasmEntryStub::FromAddress(isolate, wrapper_code->InstructionStart());
+
+ // Save and restore context around invocation and block the
+ // allocation of handles without explicit handle scopes.
+ SaveContext save(isolate);
+ SealHandleScope shs(isolate);
+
+ Address saved_c_entry_fp = *isolate->c_entry_fp_address();
+ Address saved_js_entry_sp = *isolate->js_entry_sp_address();
+ if (saved_js_entry_sp == kNullAddress) {
+ *isolate->js_entry_sp_address() = GetCurrentStackPosition();
}
- if (!has_pending_interrupts(access)) reset_limits(access);
- // Add scope to the chain.
- scope->prev_ = thread_local_.interrupt_scopes_;
- thread_local_.interrupt_scopes_ = scope;
-}
+ StackHandlerMarker stack_handler;
+ stack_handler.next = isolate->thread_local_top()->handler_;
+#ifdef V8_USE_ADDRESS_SANITIZER
+ stack_handler.padding = GetCurrentStackPosition();
+#else
+ stack_handler.padding = 0;
+#endif
+ isolate->thread_local_top()->handler_ =
+ reinterpret_cast<Address>(&stack_handler);
+ trap_handler::SetThreadInWasm();
-void StackGuard::PopInterruptsScope() {
- ExecutionAccess access(isolate_);
- InterruptsScope* top = thread_local_.interrupt_scopes_;
- DCHECK_NE(top->mode_, InterruptsScope::kNoop);
- if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
- // Make intercepted interrupts active.
- DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
- thread_local_.interrupt_flags_ |= top->intercepted_flags_;
- } else {
- DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
- // Postpone existing interupts if needed.
- if (top->prev_) {
- for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
- interrupt = interrupt << 1) {
- InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
- if ((thread_local_.interrupt_flags_ & flag) &&
- top->prev_->Intercept(flag)) {
- thread_local_.interrupt_flags_ &= ~flag;
- }
- }
+ {
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kCodeEntry == 0);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kObjectRef == 1);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kArgumentsBuffer == 2);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kCEntryFp == 3);
+ Address result = stub_entry.Call(wasm_call_target, object_ref->ptr(),
+ packed_args, saved_c_entry_fp);
+ if (result != kNullAddress) {
+ isolate->set_pending_exception(Object(result));
}
}
- if (has_pending_interrupts(access)) set_interrupt_limits(access);
- // Remove scope from chain.
- thread_local_.interrupt_scopes_ = top->prev_;
-}
-
-bool StackGuard::CheckInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & flag;
-}
-void StackGuard::RequestInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- // Check the chain of InterruptsScope for interception.
- if (thread_local_.interrupt_scopes_ &&
- thread_local_.interrupt_scopes_->Intercept(flag)) {
- return;
+ // If there was an exception, then the thread-in-wasm flag is cleared
+ // already.
+ if (trap_handler::IsThreadInWasm()) {
+ trap_handler::ClearThreadInWasm();
}
-
- // Not intercepted. Set as active interrupt flag.
- thread_local_.interrupt_flags_ |= flag;
- set_interrupt_limits(access);
-
- // If this isolate is waiting in a futex, notify it to wake up.
- isolate_->futex_wait_list_node()->NotifyWake();
-}
-
-void StackGuard::ClearInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- // Clear the interrupt flag from the chain of InterruptsScope.
- for (InterruptsScope* current = thread_local_.interrupt_scopes_;
- current != nullptr; current = current->prev_) {
- current->intercepted_flags_ &= ~flag;
+ isolate->thread_local_top()->handler_ = stack_handler.next;
+ if (saved_js_entry_sp == kNullAddress) {
+ *isolate->js_entry_sp_address() = saved_js_entry_sp;
}
-
- // Clear the interrupt flag from the active interrupt flags.
- thread_local_.interrupt_flags_ &= ~flag;
- if (!has_pending_interrupts(access)) reset_limits(access);
-}
-
-bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- bool result = (thread_local_.interrupt_flags_ & flag);
- thread_local_.interrupt_flags_ &= ~flag;
- if (!has_pending_interrupts(access)) reset_limits(access);
- return result;
-}
-
-char* StackGuard::ArchiveStackGuard(char* to) {
- ExecutionAccess access(isolate_);
- MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- ThreadLocal blank;
-
- // Set the stack limits using the old thread_local_.
- // TODO(isolates): This was the old semantics of constructing a ThreadLocal
- // (as the ctor called SetStackLimits, which looked at the
- // current thread_local_ from StackGuard)-- but is this
- // really what was intended?
- isolate_->heap()->SetStackLimits();
- thread_local_ = blank;
-
- return to + sizeof(ThreadLocal);
-}
-
-char* StackGuard::RestoreStackGuard(char* from) {
- ExecutionAccess access(isolate_);
- MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- isolate_->heap()->SetStackLimits();
- return from + sizeof(ThreadLocal);
-}
-
-void StackGuard::FreeThreadResources() {
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- per_thread->set_stack_limit(thread_local_.real_climit_);
-}
-
-void StackGuard::ThreadLocal::Clear() {
- real_jslimit_ = kIllegalLimit;
- set_jslimit(kIllegalLimit);
- real_climit_ = kIllegalLimit;
- set_climit(kIllegalLimit);
- interrupt_scopes_ = nullptr;
- interrupt_flags_ = 0;
-}
-
-bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
- bool should_set_stack_limits = false;
- if (real_climit_ == kIllegalLimit) {
- const uintptr_t kLimitSize = FLAG_stack_size * KB;
- DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
- uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
- real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
- set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
- real_climit_ = limit;
- set_climit(limit);
- should_set_stack_limits = true;
- }
- interrupt_scopes_ = nullptr;
- interrupt_flags_ = 0;
- return should_set_stack_limits;
-}
-
-void StackGuard::ClearThread(const ExecutionAccess& lock) {
- thread_local_.Clear();
- isolate_->heap()->SetStackLimits();
-}
-
-void StackGuard::InitThread(const ExecutionAccess& lock) {
- if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- uintptr_t stored_limit = per_thread->stack_limit();
- // You should hold the ExecutionAccess lock when you call this.
- if (stored_limit != 0) {
- SetStackLimit(stored_limit);
- }
-}
-
-// --- C a l l s t o n a t i v e s ---
-
-Object StackGuard::HandleInterrupts() {
- TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
-
- if (FLAG_verify_predictable) {
- // Advance synthetic time by making a time request.
- isolate_->heap()->MonotonicallyIncreasingTimeInMs();
- }
-
- if (CheckAndClearInterrupt(GC_REQUEST)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
- isolate_->heap()->HandleGCRequest();
- }
-
- if (CheckAndClearInterrupt(GROW_SHARED_MEMORY)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "V8.WasmGrowSharedMemory");
- isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
- isolate_);
- }
-
- if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
- TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
- return isolate_->TerminateExecution();
- }
-
- if (CheckAndClearInterrupt(DEOPT_MARKED_ALLOCATION_SITES)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "V8.GCDeoptMarkedAllocationSites");
- isolate_->heap()->DeoptMarkedAllocationSites();
- }
-
- if (CheckAndClearInterrupt(INSTALL_CODE)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.InstallOptimizedFunctions");
- DCHECK(isolate_->concurrent_recompilation_enabled());
- isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- }
-
- if (CheckAndClearInterrupt(API_INTERRUPT)) {
- TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
- // Callbacks must be invoked outside of ExecutionAccess lock.
- isolate_->InvokeApiInterruptCallbacks();
- }
-
- if (CheckAndClearInterrupt(LOG_WASM_CODE)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
- isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
- }
-
- if (CheckAndClearInterrupt(WASM_CODE_GC)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
- isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
- }
-
- isolate_->counters()->stack_interrupts()->Increment();
- isolate_->counters()->runtime_profiler_ticks()->Increment();
- isolate_->runtime_profiler()->MarkCandidatesForOptimization();
-
- return ReadOnlyRoots(isolate_).undefined_value();
+ *isolate->c_entry_fp_address() = saved_c_entry_fp;
}
} // namespace internal
diff --git a/deps/v8/src/execution/execution.h b/deps/v8/src/execution/execution.h
index 48a8d64424..3b8ecf038d 100644
--- a/deps/v8/src/execution/execution.h
+++ b/deps/v8/src/execution/execution.h
@@ -5,7 +5,6 @@
#ifndef V8_EXECUTION_EXECUTION_H_
#define V8_EXECUTION_EXECUTION_H_
-#include "src/base/atomicops.h"
#include "src/common/globals.h"
namespace v8 {
@@ -60,174 +59,16 @@ class Execution final : public AllStatic {
static MaybeHandle<Object> TryRunMicrotasks(
Isolate* isolate, MicrotaskQueue* microtask_queue,
MaybeHandle<Object>* exception_out);
-};
-
-class ExecutionAccess;
-class InterruptsScope;
-
-// StackGuard contains the handling of the limits that are used to limit the
-// number of nested invocations of JavaScript and the stack size used in each
-// invocation.
-class V8_EXPORT_PRIVATE StackGuard final {
- public:
- explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
-
- // Pass the address beyond which the stack should not grow. The stack
- // is assumed to grow downwards.
- void SetStackLimit(uintptr_t limit);
-
- // The simulator uses a separate JS stack. Limits on the JS stack might have
- // to be adjusted in order to reflect overflows of the C stack, because we
- // cannot rely on the interleaving of frames on the simulator.
- void AdjustStackLimitForSimulator();
-
- // Threading support.
- char* ArchiveStackGuard(char* to);
- char* RestoreStackGuard(char* from);
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
- void FreeThreadResources();
- // Sets up the default stack guard for this thread if it has not
- // already been set up.
- void InitThread(const ExecutionAccess& lock);
- // Clears the stack guard for this thread so it does not look as if
- // it has been set up.
- void ClearThread(const ExecutionAccess& lock);
-
-#define INTERRUPT_LIST(V) \
- V(TERMINATE_EXECUTION, TerminateExecution, 0) \
- V(GC_REQUEST, GC, 1) \
- V(INSTALL_CODE, InstallCode, 2) \
- V(API_INTERRUPT, ApiInterrupt, 3) \
- V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
- V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
- V(LOG_WASM_CODE, LogWasmCode, 6) \
- V(WASM_CODE_GC, WasmCodeGC, 7)
-
-#define V(NAME, Name, id) \
- inline bool Check##Name() { return CheckInterrupt(NAME); } \
- inline bool CheckAndClear##Name() { return CheckAndClearInterrupt(NAME); } \
- inline void Request##Name() { RequestInterrupt(NAME); } \
- inline void Clear##Name() { ClearInterrupt(NAME); }
- INTERRUPT_LIST(V)
-#undef V
-
- // Flag used to set the interrupt causes.
- enum InterruptFlag {
-#define V(NAME, Name, id) NAME = (1 << id),
- INTERRUPT_LIST(V)
-#undef V
-#define V(NAME, Name, id) NAME |
- ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
-#undef V
- };
-
- uintptr_t climit() { return thread_local_.climit(); }
- uintptr_t jslimit() { return thread_local_.jslimit(); }
- // This provides an asynchronous read of the stack limits for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- uintptr_t real_climit() { return thread_local_.real_climit_; }
- uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
- Address address_of_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.jslimit_);
- }
- Address address_of_real_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
- }
-
- // If the stack guard is triggered, but it is not an actual
- // stack overflow, then handle the interruption accordingly.
- Object HandleInterrupts();
-
- private:
- bool CheckInterrupt(InterruptFlag flag);
- void RequestInterrupt(InterruptFlag flag);
- void ClearInterrupt(InterruptFlag flag);
- bool CheckAndClearInterrupt(InterruptFlag flag);
-
- // You should hold the ExecutionAccess lock when calling this method.
- bool has_pending_interrupts(const ExecutionAccess& lock) {
- return thread_local_.interrupt_flags_ != 0;
- }
-
- // You should hold the ExecutionAccess lock when calling this method.
- inline void set_interrupt_limits(const ExecutionAccess& lock);
-
- // Reset limits to actual values. For example after handling interrupt.
- // You should hold the ExecutionAccess lock when calling this method.
- inline void reset_limits(const ExecutionAccess& lock);
-
- // Enable or disable interrupts.
- void EnableInterrupts();
- void DisableInterrupts();
-
-#if V8_TARGET_ARCH_64_BIT
- static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
- static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
-#else
- static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xfffffff8;
-#endif
-
- void PushInterruptsScope(InterruptsScope* scope);
- void PopInterruptsScope();
-
- class ThreadLocal final {
- public:
- ThreadLocal() { Clear(); }
- // You should hold the ExecutionAccess lock when you call Initialize or
- // Clear.
- void Clear();
-
- // Returns true if the heap's stack limits should be set, false if not.
- bool Initialize(Isolate* isolate);
-
- // The stack limit is split into a JavaScript and a C++ stack limit. These
- // two are the same except when running on a simulator where the C++ and
- // JavaScript stacks are separate. Each of the two stack limits have two
- // values. The one eith the real_ prefix is the actual stack limit
- // set for the VM. The one without the real_ prefix has the same value as
- // the actual stack limit except when there is an interruption (e.g. debug
- // break or preemption) in which case it is lowered to make stack checks
- // fail. Both the generated code and the runtime system check against the
- // one without the real_ prefix.
- uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
- uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
-
- // jslimit_ and climit_ can be read without any lock.
- // Writing requires the ExecutionAccess lock.
- base::AtomicWord jslimit_;
- base::AtomicWord climit_;
-
- uintptr_t jslimit() {
- return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
- }
- void set_jslimit(uintptr_t limit) {
- return base::Relaxed_Store(&jslimit_,
- static_cast<base::AtomicWord>(limit));
- }
- uintptr_t climit() {
- return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
- }
- void set_climit(uintptr_t limit) {
- return base::Relaxed_Store(&climit_,
- static_cast<base::AtomicWord>(limit));
- }
-
- InterruptsScope* interrupt_scopes_;
- int interrupt_flags_;
- };
-
- // TODO(isolates): Technically this could be calculated directly from a
- // pointer to StackGuard.
- Isolate* isolate_;
- ThreadLocal thread_local_;
-
- friend class Isolate;
- friend class StackLimitCheck;
- friend class InterruptsScope;
- DISALLOW_COPY_AND_ASSIGN(StackGuard);
+ // Call a Wasm function identified by {wasm_call_target} through the
+ // provided {wrapper_code}, which must match the function's signature.
+ // Upon return, either isolate->has_pending_exception() is true, or
+ // the function's return values are in {packed_args}.
+ V8_EXPORT_PRIVATE static void CallWasm(Isolate* isolate,
+ Handle<Code> wrapper_code,
+ Address wasm_call_target,
+ Handle<Object> object_ref,
+ Address packed_args);
};
} // namespace internal
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 7ddee5689e..a6e5c9522c 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -249,6 +249,13 @@ class ConstructFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(5);
};
+class CWasmEntryFrameConstants : public TypedFrameConstants {
+ public:
+ // FP-relative:
+ static constexpr int kCEntryFPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ DEFINE_TYPED_FRAME_SIZES(1);
+};
+
class WasmCompiledFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
@@ -271,7 +278,7 @@ class BuiltinContinuationFrameConstants : public TypedFrameConstants {
TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static constexpr int kBuiltinContextOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- static constexpr int kBuiltinOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+ static constexpr int kBuiltinIndexOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
// The argument count is in the first allocatable register, stored below the
// fixed part of the frame and therefore is not part of the fixed frame size.
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index aeb43fe0a6..52f38857cc 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_EXECUTION_FRAMES_INL_H_
#define V8_EXECUTION_FRAMES_INL_H_
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
@@ -48,11 +48,11 @@ inline Address StackHandler::address() const {
inline StackHandler* StackHandler::next() const {
const int offset = StackHandlerConstants::kNextOffset;
- return FromAddress(Memory<Address>(address() + offset));
+ return FromAddress(base::Memory<Address>(address() + offset));
}
inline Address StackHandler::next_address() const {
- return Memory<Address>(address() + StackHandlerConstants::kNextOffset);
+ return base::Memory<Address>(address() + StackHandlerConstants::kNextOffset);
}
inline StackHandler* StackHandler::FromAddress(Address address) {
@@ -112,21 +112,22 @@ inline Object BuiltinExitFrame::receiver_slot_object() const {
const int receiverOffset = BuiltinExitFrameConstants::kNewTargetOffset +
(argc - 1) * kSystemPointerSize;
- return Object(Memory<Address>(fp() + receiverOffset));
+ return Object(base::Memory<Address>(fp() + receiverOffset));
}
inline Object BuiltinExitFrame::argc_slot_object() const {
- return Object(Memory<Address>(fp() + BuiltinExitFrameConstants::kArgcOffset));
+ return Object(
+ base::Memory<Address>(fp() + BuiltinExitFrameConstants::kArgcOffset));
}
inline Object BuiltinExitFrame::target_slot_object() const {
return Object(
- Memory<Address>(fp() + BuiltinExitFrameConstants::kTargetOffset));
+ base::Memory<Address>(fp() + BuiltinExitFrameConstants::kTargetOffset));
}
inline Object BuiltinExitFrame::new_target_slot_object() const {
- return Object(
- Memory<Address>(fp() + BuiltinExitFrameConstants::kNewTargetOffset));
+ return Object(base::Memory<Address>(
+ fp() + BuiltinExitFrameConstants::kNewTargetOffset));
}
inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
@@ -134,20 +135,20 @@ inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
}
inline Object StandardFrame::GetExpression(int index) const {
- return Object(Memory<Address>(GetExpressionAddress(index)));
+ return Object(base::Memory<Address>(GetExpressionAddress(index)));
}
inline void StandardFrame::SetExpression(int index, Object value) {
- Memory<Address>(GetExpressionAddress(index)) = value.ptr();
+ base::Memory<Address>(GetExpressionAddress(index)) = value.ptr();
}
inline Address StandardFrame::caller_fp() const {
- return Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
+ return base::Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
}
inline Address StandardFrame::caller_pc() const {
- return Memory<Address>(ComputePCAddress(fp()));
+ return base::Memory<Address>(ComputePCAddress(fp()));
}
@@ -163,14 +164,14 @@ inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
intptr_t frame_type =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
+ base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
}
inline bool StandardFrame::IsConstructFrame(Address fp) {
intptr_t frame_type =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
+ base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
}
@@ -187,7 +188,7 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
}
inline void JavaScriptFrame::set_receiver(Object value) {
- Memory<Address>(GetParameterSlot(-1)) = value.ptr();
+ base::Memory<Address>(GetParameterSlot(-1)) = value.ptr();
}
inline bool JavaScriptFrame::has_adapted_arguments() const {
@@ -196,7 +197,7 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
inline Object JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Object(Memory<Address>(fp() + offset));
+ return Object(base::Memory<Address>(fp() + offset));
}
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index af660a338e..126cb9530e 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -33,6 +33,23 @@ namespace internal {
ReturnAddressLocationResolver StackFrame::return_address_location_resolver_ =
nullptr;
+namespace {
+
+Address AddressOf(const StackHandler* handler) {
+ Address raw = handler->address();
+#ifdef V8_USE_ADDRESS_SANITIZER
+ // ASan puts C++-allocated StackHandler markers onto its fake stack.
+ // We work around that by storing the real stack address in the "padding"
+ // field. StackHandlers allocated from generated code have 0 as padding.
+ Address padding =
+ base::Memory<Address>(raw + StackHandlerConstants::kPaddingOffset);
+ if (padding != 0) return padding;
+#endif
+ return raw;
+}
+
+} // namespace
+
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator {
@@ -40,12 +57,18 @@ class StackHandlerIterator {
StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
: limit_(frame->fp()), handler_(handler) {
// Make sure the handler has already been unwound to this frame.
- DCHECK(frame->sp() <= handler->address());
+ DCHECK(frame->sp() <= AddressOf(handler));
+ // For CWasmEntry frames, the handler was registered by the last C++
+ // frame (Execution::CallWasm), so even though its address is already
+ // beyond the limit, we know we always want to unwind one handler.
+ if (frame->type() == StackFrame::C_WASM_ENTRY) {
+ handler_ = handler_->next();
+ }
}
StackHandler* handler() const { return handler_; }
- bool done() { return handler_ == nullptr || handler_->address() > limit_; }
+ bool done() { return handler_ == nullptr || AddressOf(handler_) > limit_; }
void Advance() {
DCHECK(!done());
handler_ = handler_->next();
@@ -146,7 +169,7 @@ StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
}
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate,
- StackFrame::Id id)
+ StackFrameId id)
: StackTraceFrameIterator(isolate) {
while (!done() && frame()->id() != id) Advance();
}
@@ -255,6 +278,11 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
bool advance_frame = true;
Address fast_c_fp = isolate->isolate_data()->fast_c_call_caller_fp();
+ uint8_t stack_is_iterable = isolate->isolate_data()->stack_is_iterable();
+ if (!stack_is_iterable) {
+ frame_ = nullptr;
+ return;
+ }
// 'Fast C calls' are a special type of C call where we call directly from JS
// to C without an exit frame inbetween. The CEntryStub is responsible for
// setting Isolate::c_entry_fp, meaning that it won't be set for fast C calls.
@@ -637,6 +665,12 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
return ExitFrame::GetStateForFramePointer(fp, state);
}
+StackFrame::Type CWasmEntryFrame::GetCallerState(State* state) const {
+ const int offset = CWasmEntryFrameConstants::kCEntryFPOffset;
+ Address fp = Memory<Address>(this->fp() + offset);
+ return ExitFrame::GetStateForFramePointer(fp, state);
+}
+
Code ConstructEntryFrame::unchecked_code() const {
return isolate()->heap()->builtin(Builtins::kJSConstructEntry);
}
@@ -972,7 +1006,6 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
parameters_limit);
}
- DEFINE_ROOT_VALUE(isolate());
// Visit pointer spill slots and locals.
uint8_t* safepoint_bits = safepoint_entry.bits();
for (unsigned index = 0; index < stack_slots; index++) {
@@ -992,7 +1025,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
if (!HAS_SMI_TAG(compressed_value)) {
// We don't need to update smi values.
*spill_slot.location() =
- DecompressTaggedPointer(ROOT_VALUE, compressed_value);
+ DecompressTaggedPointer(isolate(), compressed_value);
}
#endif
v->VisitRootPointer(Root::kTop, nullptr, spill_slot);
@@ -1910,7 +1943,8 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous() && code->handler_table_size() > 0) {
- HandlerTable table(code->handler_table(), code->handler_table_size());
+ HandlerTable table(code->handler_table(), code->handler_table_size(),
+ HandlerTable::kReturnAddressBasedEncoding);
int pc_offset = static_cast<int>(pc() - code->instruction_start());
*stack_slots = static_cast<int>(code->stack_slots());
return table.LookupReturn(pc_offset);
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 982716db93..1f83984f97 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -6,6 +6,7 @@
#define V8_EXECUTION_FRAMES_H_
#include "src/codegen/safepoint-table.h"
+#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/objects/code.h"
#include "src/objects/objects.h"
@@ -98,12 +99,6 @@ class StackFrame {
};
#undef DECLARE_TYPE
- // Opaque data type for identifying stack frames. Used extensively
- // by the debugger.
- // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
- // has correct value range (see Issue 830 for more details).
- enum Id { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
-
// Used to mark the outermost JS entry frame.
//
// The mark is an opaque value that should be pushed onto the stack directly,
@@ -112,7 +107,9 @@ class StackFrame {
INNER_JSENTRY_FRAME = (0 << kSmiTagSize) | kSmiTag,
OUTERMOST_JSENTRY_FRAME = (1 << kSmiTagSize) | kSmiTag
};
+ // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((INNER_JSENTRY_FRAME & kHeapObjectTagMask) != kHeapObjectTag);
+ // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((OUTERMOST_JSENTRY_FRAME & kHeapObjectTagMask) !=
kHeapObjectTag);
@@ -145,7 +142,13 @@ class StackFrame {
// the type of the value on the stack.
static Type MarkerToType(intptr_t marker) {
DCHECK(IsTypeMarker(marker));
- return static_cast<Type>(marker >> kSmiTagSize);
+ intptr_t type = marker >> kSmiTagSize;
+ // TODO(petermarshall): There is a bug in the arm simulators that causes
+ // invalid frame markers.
+#if !(defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM))
+ DCHECK_LT(static_cast<uintptr_t>(type), Type::NUMBER_OF_TYPES);
+#endif
+ return static_cast<Type>(type);
}
// Check if a marker is a stack frame type marker or a tagged pointer.
@@ -172,10 +175,7 @@ class StackFrame {
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
- bool is_wasm_exit() const { return type() == WASM_EXIT; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
- bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
- bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
bool is_wasm_interpreter_entry() const {
return type() == WASM_INTERPRETER_ENTRY;
}
@@ -234,7 +234,7 @@ class StackFrame {
}
// Get the id of this stack frame.
- Id id() const { return static_cast<Id>(caller_sp()); }
+ StackFrameId id() const { return static_cast<StackFrameId>(caller_sp()); }
// Get the top handler from the current stack iterator.
inline StackHandler* top_handler() const;
@@ -1052,6 +1052,7 @@ class CWasmEntryFrame : public StubFrame {
private:
friend class StackFrameIteratorBase;
+ Type GetCallerState(State* state) const override;
};
class WasmCompileLazyFrame : public StandardFrame {
@@ -1259,7 +1260,7 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
// Skip frames until the frame with the given id is reached.
- StackTraceFrameIterator(Isolate* isolate, StackFrame::Id id);
+ StackTraceFrameIterator(Isolate* isolate, StackFrameId id);
bool done() const { return iterator_.done(); }
void Advance();
void AdvanceOneFrame() { iterator_.Advance(); }
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.cc b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
index e5e3855c79..7faecdb858 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
@@ -4,12 +4,12 @@
#if V8_TARGET_ARCH_IA32
+#include "src/execution/ia32/frame-constants-ia32.h"
+
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/assembler-ia32-inl.h"
-#include "src/codegen/ia32/assembler-ia32.h"
#include "src/execution/frame-constants.h"
-
-#include "src/execution/ia32/frame-constants-ia32.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/interrupts-scope.cc b/deps/v8/src/execution/interrupts-scope.cc
new file mode 100644
index 0000000000..cf8611f8d6
--- /dev/null
+++ b/deps/v8/src/execution/interrupts-scope.cc
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/interrupts-scope.h"
+
+#include "src/execution/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+InterruptsScope::InterruptsScope(Isolate* isolate, int intercept_mask,
+ Mode mode)
+ : stack_guard_(isolate->stack_guard()),
+ intercept_mask_(intercept_mask),
+ intercepted_flags_(0),
+ mode_(mode) {
+ if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
+}
+
+bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
+ InterruptsScope* last_postpone_scope = nullptr;
+ for (InterruptsScope* current = this; current; current = current->prev_) {
+ // We only consider scopes related to passed flag.
+ if (!(current->intercept_mask_ & flag)) continue;
+ if (current->mode_ == kRunInterrupts) {
+ // If innermost scope is kRunInterrupts scope, prevent interrupt from
+ // being intercepted.
+ break;
+ } else {
+ DCHECK_EQ(current->mode_, kPostponeInterrupts);
+ last_postpone_scope = current;
+ }
+ }
+ // If there is no postpone scope for passed flag then we should not intercept.
+ if (!last_postpone_scope) return false;
+ last_postpone_scope->intercepted_flags_ |= flag;
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/interrupts-scope.h b/deps/v8/src/execution/interrupts-scope.h
new file mode 100644
index 0000000000..3d74850a84
--- /dev/null
+++ b/deps/v8/src/execution/interrupts-scope.h
@@ -0,0 +1,72 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_INTERRUPTS_SCOPE_H_
+#define V8_EXECUTION_INTERRUPTS_SCOPE_H_
+
+#include "src/execution/stack-guard.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// Scope intercepts only interrupt which is part of its interrupt_mask and does
+// not affect other interrupts.
+class InterruptsScope {
+ public:
+ enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
+
+ V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, int intercept_mask,
+ Mode mode);
+
+ virtual ~InterruptsScope() {
+ if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
+ }
+
+ // Find the scope that intercepts this interrupt.
+ // It may be outermost PostponeInterruptsScope or innermost
+ // SafeForInterruptsScope if any.
+ // Return whether the interrupt has been intercepted.
+ bool Intercept(StackGuard::InterruptFlag flag);
+
+ private:
+ StackGuard* stack_guard_;
+ int intercept_mask_;
+ int intercepted_flags_;
+ Mode mode_;
+ InterruptsScope* prev_;
+
+ friend class StackGuard;
+};
+
+// Support for temporarily postponing interrupts. When the outermost
+// postpone scope is left the interrupts will be re-enabled and any
+// interrupts that occurred while in the scope will be taken into
+// account.
+class PostponeInterruptsScope : public InterruptsScope {
+ public:
+ PostponeInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ : InterruptsScope(isolate, intercept_mask,
+ InterruptsScope::kPostponeInterrupts) {}
+ ~PostponeInterruptsScope() override = default;
+};
+
+// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
+// innermost scope is SafeForInterruptsScope ignoring any outer
+// PostponeInterruptsScopes.
+class SafeForInterruptsScope : public InterruptsScope {
+ public:
+ SafeForInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ : InterruptsScope(isolate, intercept_mask,
+ InterruptsScope::kRunInterrupts) {}
+ ~SafeForInterruptsScope() override = default;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_INTERRUPTS_SCOPE_H_
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index d83ae708ec..adeb7f54d3 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -81,8 +81,10 @@ class IsolateData final {
// The FP and PC that are saved right before TurboAssembler::CallCFunction.
Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
+ uint8_t* stack_is_iterable_address() { return &stack_is_iterable_; }
Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
+ uint8_t stack_is_iterable() { return stack_is_iterable_; }
// Returns true if this address points to data stored in this instance.
// If it's the case then the value can be accessed indirectly through the
@@ -121,6 +123,7 @@ class IsolateData final {
V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
+ V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
8 + RoundUp<8>(static_cast<int>(kPaddingOffset)) - kPaddingOffset) \
@@ -172,6 +175,9 @@ class IsolateData final {
// instruction in compiled code.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;
+ // Whether the SafeStackFrameIterator can successfully iterate the current
+ // stack. Only valid values are 0 or 1.
+ uint8_t stack_is_iterable_ = 1;
// Ensure the size is 8-byte aligned in order to make alignment of the field
// following the IsolateData field predictable. This solves the issue with
@@ -219,6 +225,8 @@ void IsolateData::AssertPredictableLayout() {
kFastCCallCallerFPOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
kFastCCallCallerPCOffset);
+ STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
+ kStackIsIterableOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
}
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index fcbbed139c..7e037fb410 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -145,9 +145,10 @@ bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
Smi::ToInt(species_cell.value()) == kProtectorValid;
}
-bool Isolate::IsRegExpSpeciesLookupChainIntact() {
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kRegExpSpeciesProtector));
+bool Isolate::IsRegExpSpeciesLookupChainIntact(
+ Handle<NativeContext> native_context) {
+ DCHECK_EQ(*native_context, this->raw_native_context());
+ PropertyCell species_cell = native_context->regexp_species_protector();
return species_cell.value().IsSmi() &&
Smi::ToInt(species_cell.value()) == kProtectorValid;
}
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
new file mode 100644
index 0000000000..6095970a31
--- /dev/null
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ISOLATE_UTILS_INL_H_
+#define V8_EXECUTION_ISOLATE_UTILS_INL_H_
+
+#include "src/execution/isolate-utils.h"
+
+#include "src/common/ptr-compr-inl.h"
+#include "src/execution/isolate.h"
+#include "src/heap/heap-write-barrier-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline Isolate* GetIsolateForPtrCompr(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ return Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+#else
+ return nullptr;
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ return GetIsolateFromWritableObject(object)->heap();
+#else
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ return chunk->GetHeap();
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ DCHECK_NOT_NULL(isolate);
+ return isolate;
+#else
+ return Isolate::FromHeap(GetHeapFromWritableObject(object));
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
+#ifdef V8_COMPRESS_POINTERS
+ *isolate = GetIsolateFromWritableObject(object);
+ return true;
+#else
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (chunk->InReadOnlySpace()) {
+ *isolate = nullptr;
+ return false;
+ }
+ *isolate = Isolate::FromHeap(chunk->GetHeap());
+ return true;
+#endif // V8_COMPRESS_POINTERS
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_ISOLATE_UTILS_INL_H_
diff --git a/deps/v8/src/execution/isolate-utils.h b/deps/v8/src/execution/isolate-utils.h
new file mode 100644
index 0000000000..31c154e7a4
--- /dev/null
+++ b/deps/v8/src/execution/isolate-utils.h
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ISOLATE_UTILS_H_
+#define V8_EXECUTION_ISOLATE_UTILS_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Computes isolate from any read only or writable heap object. The resulting
+// value is intended to be used only as a hoisted computation of isolate root
+// inside trivial accessors for optmizing value decompression.
+// When pointer compression is disabled this function always returns nullptr.
+V8_INLINE Isolate* GetIsolateForPtrCompr(HeapObject object);
+
+V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object);
+
+V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object);
+
+// Returns true if it succeeded to obtain isolate from given object.
+// If it fails then the object is definitely a read-only object but it may also
+// succeed for read only objects if pointer compression is enabled.
+V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_ISOLATE_UTILS_H_
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 8a8db12ca3..2b3551cdfb 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -736,15 +736,19 @@ class FrameArrayBuilder {
}
// Creates a StackTraceFrame object for each frame in the FrameArray.
- Handle<FixedArray> GetElementsAsStackTraceFrameArray() {
+ Handle<FixedArray> GetElementsAsStackTraceFrameArray(
+ bool enable_frame_caching) {
elements_->ShrinkToFit(isolate_);
const int frame_count = elements_->FrameCount();
Handle<FixedArray> stack_trace =
isolate_->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; ++i) {
- // Caching stack frames only happens for non-Wasm frames.
- if (!elements_->IsAnyWasmFrame(i)) {
+ // Caching stack frames only happens for user JS frames.
+ const bool cache_frame =
+ enable_frame_caching && !elements_->IsAnyWasmFrame(i) &&
+ elements_->Function(i).shared().IsUserJavaScript();
+ if (cache_frame) {
MaybeHandle<StackTraceFrame> maybe_frame =
StackFrameCacheHelper::LookupCachedFrame(
isolate_, handle(elements_->Code(i), isolate_),
@@ -760,7 +764,7 @@ class FrameArrayBuilder {
isolate_->factory()->NewStackTraceFrame(elements_, i);
stack_trace->set(i, *frame);
- if (!elements_->IsAnyWasmFrame(i)) {
+ if (cache_frame) {
StackFrameCacheHelper::CacheFrameAndUpdateCache(
isolate_, handle(elements_->Code(i), isolate_),
Smi::ToInt(elements_->Offset(i)), frame);
@@ -938,6 +942,14 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
PromiseCapability::cast(context->get(index)), isolate);
if (!capability->promise().IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
+ } else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
+ Builtins::kPromiseCapabilityDefaultResolve)) {
+ Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
+ isolate);
+ Handle<Context> context(function->context(), isolate);
+ promise =
+ handle(JSPromise::cast(context->get(PromiseBuiltins::kPromiseSlot)),
+ isolate);
} else {
// We have some generic promise chain here, so try to
// continue with the chained promise on the reaction
@@ -973,9 +985,7 @@ struct CaptureStackTraceOptions {
bool capture_builtin_exit_frames;
bool capture_only_frames_subject_to_debugging;
bool async_stack_trace;
-
- enum CaptureResult { RAW_FRAME_ARRAY, STACK_TRACE_FRAME_ARRAY };
- CaptureResult capture_result;
+ bool enable_frame_caching;
};
Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
@@ -1105,10 +1115,8 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
}
// TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
- if (options.capture_result == CaptureStackTraceOptions::RAW_FRAME_ARRAY) {
- return builder.GetElements();
- }
- return builder.GetElementsAsStackTraceFrameArray();
+ return builder.GetElementsAsStackTraceFrameArray(
+ options.enable_frame_caching);
}
} // namespace
@@ -1126,7 +1134,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
options.async_stack_trace = FLAG_async_stack_traces;
options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = false;
- options.capture_result = CaptureStackTraceOptions::RAW_FRAME_ARRAY;
+ options.enable_frame_caching = false;
return CaptureStackTrace(this, caller, options);
}
@@ -1222,7 +1230,7 @@ Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
? FrameArrayBuilder::ALL
: FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = true;
- options.capture_result = CaptureStackTraceOptions::STACK_TRACE_FRAME_ARRAY;
+ options.enable_frame_caching = true;
return Handle<FixedArray>::cast(
CaptureStackTrace(this, factory()->undefined_value(), options));
@@ -1377,7 +1385,8 @@ Object Isolate::StackOverflow() {
Handle<Object> exception;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
this, exception,
- ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller, true));
+ ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kSimple));
Throw(*exception, nullptr);
@@ -1621,7 +1630,12 @@ Object Isolate::UnwindAndFindHandler() {
thread_local_top()->pending_handler_fp_ = handler_fp;
thread_local_top()->pending_handler_sp_ = handler_sp;
- // Return and clear pending exception.
+ // Return and clear pending exception. The contract is that:
+ // (1) the pending exception is stored in one place (no duplication), and
+ // (2) within generated-code land, that one place is the return register.
+ // If/when we unwind back into C++ (returning to the JSEntry stub,
+ // or to Execution::CallWasm), the returned exception will be sent
+ // back to isolate->set_pending_exception(...).
clear_pending_exception();
return exception;
};
@@ -1656,6 +1670,19 @@ Object Isolate::UnwindAndFindHandler() {
0);
}
+ case StackFrame::C_WASM_ENTRY: {
+ StackHandler* handler = frame->top_handler();
+ thread_local_top()->handler_ = handler->next_address();
+ Code code = frame->LookupCode();
+ HandlerTable table(code);
+ Address instruction_start = code.InstructionStart();
+ int return_offset = static_cast<int>(frame->pc() - instruction_start);
+ int handler_offset = table.LookupReturn(return_offset);
+ DCHECK_NE(-1, handler_offset);
+ return FoundHandler(Context(), instruction_start, handler_offset,
+ code.constant_pool(), frame->sp(), frame->fp());
+ }
+
case StackFrame::WASM_COMPILED: {
if (trap_handler::IsThreadInWasm()) {
trap_handler::ClearThreadInWasm();
@@ -2014,33 +2041,23 @@ Object Isolate::PromoteScheduledException() {
}
void Isolate::PrintCurrentStackTrace(FILE* out) {
- IncrementalStringBuilder builder(this);
- for (StackTraceFrameIterator it(this); !it.done(); it.Advance()) {
- if (!it.is_javascript()) continue;
+ CaptureStackTraceOptions options;
+ options.limit = 0;
+ options.skip_mode = SKIP_NONE;
+ options.capture_builtin_exit_frames = true;
+ options.async_stack_trace = FLAG_async_stack_traces;
+ options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
+ options.capture_only_frames_subject_to_debugging = false;
+ options.enable_frame_caching = false;
- HandleScope scope(this);
- JavaScriptFrame* frame = it.javascript_frame();
-
- Handle<Object> receiver(frame->receiver(), this);
- Handle<JSFunction> function(frame->function(), this);
- Handle<AbstractCode> code;
- int offset;
- if (frame->is_interpreted()) {
- InterpretedFrame* interpreted_frame = InterpretedFrame::cast(frame);
- code = handle(AbstractCode::cast(interpreted_frame->GetBytecodeArray()),
- this);
- offset = interpreted_frame->GetBytecodeOffset();
- } else {
- code = handle(AbstractCode::cast(frame->LookupCode()), this);
- offset = static_cast<int>(frame->pc() - code->InstructionStart());
- }
+ Handle<FixedArray> frames = Handle<FixedArray>::cast(
+ CaptureStackTrace(this, this->factory()->undefined_value(), options));
- // To preserve backwards compatiblity, only append a newline when
- // the current stringified frame actually has characters.
- const int old_length = builder.Length();
- JSStackFrame site(this, receiver, function, code, offset);
- site.ToString(builder);
- if (old_length != builder.Length()) builder.AppendCharacter('\n');
+ IncrementalStringBuilder builder(this);
+ for (int i = 0; i < frames->length(); ++i) {
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this);
+
+ SerializeStackTraceFrame(this, frame, builder);
}
Handle<String> stack_trace = builder.Finish().ToHandleChecked();
@@ -2113,7 +2130,8 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
JSReceiver::GetDataProperty(Handle<JSObject>::cast(exception), key);
if (!property->IsFixedArray()) return false;
- Handle<FrameArray> elements = Handle<FrameArray>::cast(property);
+ Handle<FrameArray> elements =
+ GetFrameArrayFromStackTrace(this, Handle<FixedArray>::cast(property));
const int frame_count = elements->FrameCount();
for (int i = 0; i < frame_count; i++) {
@@ -2248,7 +2266,7 @@ bool Isolate::IsExternalHandlerOnTop(Object exception) {
}
void Isolate::ReportPendingMessagesImpl(bool report_externally) {
- Object exception = pending_exception();
+ Object exception_obj = pending_exception();
// Clear the pending message object early to avoid endless recursion.
Object message_obj = thread_local_top()->pending_message_obj_;
@@ -2256,7 +2274,7 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
// For uncatchable exceptions we do nothing. If needed, the exception and the
// message have already been propagated to v8::TryCatch.
- if (!is_catchable_by_javascript(exception)) return;
+ if (!is_catchable_by_javascript(exception_obj)) return;
// Determine whether the message needs to be reported to all message handlers
// depending on whether and external v8::TryCatch or an internal JavaScript
@@ -2267,19 +2285,20 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
should_report_exception = try_catch_handler()->is_verbose_;
} else {
// Report the exception if it isn't caught by JavaScript code.
- should_report_exception = !IsJavaScriptHandlerOnTop(exception);
+ should_report_exception = !IsJavaScriptHandlerOnTop(exception_obj);
}
// Actually report the pending message to all message handlers.
if (!message_obj.IsTheHole(this) && should_report_exception) {
HandleScope scope(this);
Handle<JSMessageObject> message(JSMessageObject::cast(message_obj), this);
+ Handle<Object> exception(exception_obj, this);
Handle<Script> script(message->script(), this);
// Clear the exception and restore it afterwards, otherwise
// CollectSourcePositions will abort.
clear_pending_exception();
JSMessageObject::EnsureSourcePositionsAvailable(this, message);
- set_pending_exception(exception);
+ set_pending_exception(*exception);
int start_pos = message->GetStartPosition();
int end_pos = message->GetEndPosition();
MessageLocation location(script, start_pos, end_pos);
@@ -2853,6 +2872,13 @@ void Isolate::Delete(Isolate* isolate) {
SetIsolateThreadLocals(saved_isolate, saved_data);
}
+void Isolate::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
+ DCHECK_NOT_NULL(ro_heap);
+ DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
+ read_only_heap_ = ro_heap;
+ heap_.SetUpFromReadOnlyHeap(ro_heap);
+}
+
v8::PageAllocator* Isolate::page_allocator() {
return isolate_allocator_->page_allocator();
}
@@ -3282,6 +3308,21 @@ bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
return Init(read_only_deserializer, startup_deserializer);
}
+static void AddCrashKeysForIsolateAndHeapPointers(Isolate* isolate) {
+ v8::Platform* platform = V8::GetCurrentPlatform();
+
+ const int id = isolate->id();
+ platform->AddCrashKey(id, "isolate", reinterpret_cast<uintptr_t>(isolate));
+
+ auto heap = isolate->heap();
+ platform->AddCrashKey(id, "ro_space",
+ reinterpret_cast<uintptr_t>(heap->read_only_space()->first_page()));
+ platform->AddCrashKey(id, "map_space",
+ reinterpret_cast<uintptr_t>(heap->map_space()->first_page()));
+ platform->AddCrashKey(id, "code_space",
+ reinterpret_cast<uintptr_t>(heap->code_space()->first_page()));
+}
+
bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
StartupDeserializer* startup_deserializer) {
TRACE_ISOLATE(init);
@@ -3432,7 +3473,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
if (create_heap_objects) {
heap_.read_only_space()->ClearStringPaddingIfNeeded();
- heap_.read_only_heap()->OnCreateHeapObjectsComplete(this);
+ read_only_heap_->OnCreateHeapObjectsComplete(this);
} else {
startup_deserializer->DeserializeInto(this);
}
@@ -3527,6 +3568,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
}
+ AddCrashKeysForIsolateAndHeapPointers(this);
return true;
}
@@ -3693,9 +3735,9 @@ void Isolate::MaybeInitializeVectorListFromHeap() {
std::vector<Handle<FeedbackVector>> vectors;
{
- HeapIterator heap_iterator(heap());
- for (HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
- current_obj = heap_iterator.next()) {
+ HeapObjectIterator heap_iterator(heap());
+ for (HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null();
+ current_obj = heap_iterator.Next()) {
if (!current_obj.IsFeedbackVector()) continue;
FeedbackVector vector = FeedbackVector::cast(current_obj);
@@ -3907,13 +3949,31 @@ void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
if (!IsNoElementsProtectorIntact()) return;
if (!IsArrayOrObjectOrStringPrototype(*object)) return;
PropertyCell::SetValueWithInvalidation(
- this, factory()->no_elements_protector(),
+ this, "no_elements_protector", factory()->no_elements_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
}
+void Isolate::TraceProtectorInvalidation(const char* protector_name) {
+ static constexpr char kInvalidateProtectorTracingCategory[] =
+ "V8.InvalidateProtector";
+ static constexpr char kInvalidateProtectorTracingArg[] = "protector-name";
+
+ DCHECK(FLAG_trace_protector_invalidation);
+
+ // TODO(jgruber): Remove the PrintF once tracing can output to stdout.
+ i::PrintF("Invalidating protector cell %s in isolate %p\n", protector_name,
+ this);
+ TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
+ TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
+ protector_name);
+}
+
void Isolate::InvalidateIsConcatSpreadableProtector() {
DCHECK(factory()->is_concat_spreadable_protector()->value().IsSmi());
DCHECK(IsIsConcatSpreadableLookupChainIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("is_concat_spreadable_protector");
+ }
factory()->is_concat_spreadable_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsIsConcatSpreadableLookupChainIntact());
@@ -3922,6 +3982,9 @@ void Isolate::InvalidateIsConcatSpreadableProtector() {
void Isolate::InvalidateArrayConstructorProtector() {
DCHECK(factory()->array_constructor_protector()->value().IsSmi());
DCHECK(IsArrayConstructorIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("array_constructor_protector");
+ }
factory()->array_constructor_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsArrayConstructorIntact());
@@ -3931,7 +3994,7 @@ void Isolate::InvalidateArraySpeciesProtector() {
DCHECK(factory()->array_species_protector()->value().IsSmi());
DCHECK(IsArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_species_protector(),
+ this, "array_species_protector", factory()->array_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArraySpeciesLookupChainIntact());
}
@@ -3940,25 +4003,30 @@ void Isolate::InvalidateTypedArraySpeciesProtector() {
DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
DCHECK(IsTypedArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->typed_array_species_protector(),
+ this, "typed_array_species_protector",
+ factory()->typed_array_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsTypedArraySpeciesLookupChainIntact());
}
-void Isolate::InvalidateRegExpSpeciesProtector() {
- DCHECK(factory()->regexp_species_protector()->value().IsSmi());
- DCHECK(IsRegExpSpeciesLookupChainIntact());
+void Isolate::InvalidateRegExpSpeciesProtector(
+ Handle<NativeContext> native_context) {
+ DCHECK_EQ(*native_context, this->raw_native_context());
+ DCHECK(native_context->regexp_species_protector().value().IsSmi());
+ DCHECK(IsRegExpSpeciesLookupChainIntact(native_context));
+ Handle<PropertyCell> species_cell(native_context->regexp_species_protector(),
+ this);
PropertyCell::SetValueWithInvalidation(
- this, factory()->regexp_species_protector(),
+ this, "regexp_species_protector", species_cell,
handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsRegExpSpeciesLookupChainIntact());
+ DCHECK(!IsRegExpSpeciesLookupChainIntact(native_context));
}
void Isolate::InvalidatePromiseSpeciesProtector() {
DCHECK(factory()->promise_species_protector()->value().IsSmi());
DCHECK(IsPromiseSpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_species_protector(),
+ this, "promise_species_protector", factory()->promise_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseSpeciesLookupChainIntact());
}
@@ -3966,6 +4034,9 @@ void Isolate::InvalidatePromiseSpeciesProtector() {
void Isolate::InvalidateStringLengthOverflowProtector() {
DCHECK(factory()->string_length_protector()->value().IsSmi());
DCHECK(IsStringLengthOverflowIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("string_length_protector");
+ }
factory()->string_length_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsStringLengthOverflowIntact());
@@ -3975,7 +4046,7 @@ void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(factory()->array_iterator_protector()->value().IsSmi());
DCHECK(IsArrayIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_iterator_protector(),
+ this, "array_iterator_protector", factory()->array_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayIteratorLookupChainIntact());
}
@@ -3984,7 +4055,7 @@ void Isolate::InvalidateMapIteratorProtector() {
DCHECK(factory()->map_iterator_protector()->value().IsSmi());
DCHECK(IsMapIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->map_iterator_protector(),
+ this, "map_iterator_protector", factory()->map_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsMapIteratorLookupChainIntact());
}
@@ -3993,7 +4064,7 @@ void Isolate::InvalidateSetIteratorProtector() {
DCHECK(factory()->set_iterator_protector()->value().IsSmi());
DCHECK(IsSetIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->set_iterator_protector(),
+ this, "set_iterator_protector", factory()->set_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsSetIteratorLookupChainIntact());
}
@@ -4002,7 +4073,7 @@ void Isolate::InvalidateStringIteratorProtector() {
DCHECK(factory()->string_iterator_protector()->value().IsSmi());
DCHECK(IsStringIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->string_iterator_protector(),
+ this, "string_iterator_protector", factory()->string_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsStringIteratorLookupChainIntact());
}
@@ -4011,7 +4082,8 @@ void Isolate::InvalidateArrayBufferDetachingProtector() {
DCHECK(factory()->array_buffer_detaching_protector()->value().IsSmi());
DCHECK(IsArrayBufferDetachingIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_buffer_detaching_protector(),
+ this, "array_buffer_detaching_protector",
+ factory()->array_buffer_detaching_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayBufferDetachingIntact());
}
@@ -4020,7 +4092,7 @@ void Isolate::InvalidatePromiseHookProtector() {
DCHECK(factory()->promise_hook_protector()->value().IsSmi());
DCHECK(IsPromiseHookProtectorIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_hook_protector(),
+ this, "promise_hook_protector", factory()->promise_hook_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseHookProtectorIntact());
}
@@ -4028,6 +4100,9 @@ void Isolate::InvalidatePromiseHookProtector() {
void Isolate::InvalidatePromiseResolveProtector() {
DCHECK(factory()->promise_resolve_protector()->value().IsSmi());
DCHECK(IsPromiseResolveLookupChainIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("promise_resolve_protector");
+ }
factory()->promise_resolve_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsPromiseResolveLookupChainIntact());
@@ -4037,7 +4112,7 @@ void Isolate::InvalidatePromiseThenProtector() {
DCHECK(factory()->promise_then_protector()->value().IsSmi());
DCHECK(IsPromiseThenLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_then_protector(),
+ this, "promise_then_protector", factory()->promise_then_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseThenLookupChainIntact());
}
@@ -4176,7 +4251,7 @@ void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
// set is still open (whether to clear it after every microtask or once
// during a microtask checkpoint). See also
// https://github.com/tc39/proposal-weakrefs/issues/39 .
- heap()->ClearKeepDuringJobSet();
+ heap()->ClearKeptObjects();
}
if (call_completed_callbacks_.empty()) return;
@@ -4261,7 +4336,7 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
}
Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
- Handle<Module> module) {
+ Handle<SourceTextModule> module) {
Handle<Object> host_meta(module->import_meta(), this);
if (host_meta->IsTheHole(this)) {
host_meta = factory()->NewJSObjectWithNullProto();
@@ -4269,7 +4344,7 @@ Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
v8::Local<v8::Context> api_context =
v8::Utils::ToLocal(Handle<Context>(native_context()));
host_initialize_import_meta_object_callback_(
- api_context, Utils::ToLocal(module),
+ api_context, Utils::ToLocal(Handle<Module>::cast(module)),
v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(host_meta)));
}
module->set_import_meta(*host_meta);
@@ -4641,26 +4716,6 @@ AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
#endif // DEBUG
-bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
- InterruptsScope* last_postpone_scope = nullptr;
- for (InterruptsScope* current = this; current; current = current->prev_) {
- // We only consider scopes related to passed flag.
- if (!(current->intercept_mask_ & flag)) continue;
- if (current->mode_ == kRunInterrupts) {
- // If innermost scope is kRunInterrupts scope, prevent interrupt from
- // being intercepted.
- break;
- } else {
- DCHECK_EQ(current->mode_, kPostponeInterrupts);
- last_postpone_scope = current;
- }
- }
- // If there is no postpone scope for passed flag then we should not intercept.
- if (!last_postpone_scope) return false;
- last_postpone_scope->intercepted_flags_ |= flag;
- return true;
-}
-
#undef TRACE_ISOLATE
} // namespace internal
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 4b4bf9cd7c..2ead7bf844 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -23,6 +23,7 @@
#include "src/execution/futex-emulation.h"
#include "src/execution/isolate-data.h"
#include "src/execution/messages.h"
+#include "src/execution/stack-guard.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
@@ -69,7 +70,6 @@ class CodeTracer;
class CompilationCache;
class CompilationStatistics;
class CompilerDispatcher;
-class ContextSlotCache;
class Counters;
class Debug;
class DeoptimizerData;
@@ -91,8 +91,8 @@ class RootVisitor;
class RuntimeProfiler;
class SetupIsolateDelegate;
class Simulator;
-class StartupDeserializer;
class StandardFrame;
+class StartupDeserializer;
class StubCache;
class ThreadManager;
class ThreadState;
@@ -397,6 +397,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(OOMErrorCallback, oom_behavior, nullptr) \
V(LogEventCallback, event_logger, nullptr) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback, \
+ nullptr) \
V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
@@ -515,6 +517,8 @@ class Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
+ void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
+
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();
@@ -900,6 +904,7 @@ class Isolate final : private HiddenFactory {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
+ ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
static Isolate* FromHeap(Heap* heap) {
return reinterpret_cast<Isolate*>(reinterpret_cast<Address>(heap) -
OFFSET_OF(Isolate, heap_));
@@ -915,6 +920,9 @@ class Isolate final : private HiddenFactory {
static size_t isolate_root_bias() {
return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
}
+ static Isolate* FromRoot(Address isolate_root) {
+ return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
+ }
RootsTable& roots_table() { return isolate_data()->roots(); }
@@ -1168,7 +1176,8 @@ class Isolate final : private HiddenFactory {
inline bool IsArraySpeciesLookupChainIntact();
inline bool IsTypedArraySpeciesLookupChainIntact();
- inline bool IsRegExpSpeciesLookupChainIntact();
+ inline bool IsRegExpSpeciesLookupChainIntact(
+ Handle<NativeContext> native_context);
// Check that the @@species protector is intact, which guards the lookup of
// "constructor" on JSPromise instances, whose [[Prototype]] is the initial
@@ -1250,10 +1259,14 @@ class Isolate final : private HiddenFactory {
void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
UpdateNoElementsProtectorOnSetElement(object);
}
+
+ // The `protector_name` C string must be statically allocated.
+ void TraceProtectorInvalidation(const char* protector_name);
+
void InvalidateArrayConstructorProtector();
void InvalidateArraySpeciesProtector();
void InvalidateTypedArraySpeciesProtector();
- void InvalidateRegExpSpeciesProtector();
+ void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context);
void InvalidatePromiseSpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
@@ -1469,7 +1482,7 @@ class Isolate final : private HiddenFactory {
void SetHostInitializeImportMetaObjectCallback(
HostInitializeImportMetaObjectCallback callback);
V8_EXPORT_PRIVATE Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
- Handle<Module> module);
+ Handle<SourceTextModule> module);
void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
embedded_file_writer_ = writer;
@@ -1647,6 +1660,7 @@ class Isolate final : private HiddenFactory {
std::unique_ptr<IsolateAllocator> isolate_allocator_;
Heap heap_;
+ ReadOnlyHeap* read_only_heap_ = nullptr;
const int id_;
EntryStackItem* entry_stack_ = nullptr;
@@ -1982,65 +1996,6 @@ class StackLimitCheck {
} \
} while (false)
-// Scope intercepts only interrupt which is part of its interrupt_mask and does
-// not affect other interrupts.
-class InterruptsScope {
- public:
- enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
-
- virtual ~InterruptsScope() {
- if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
- }
-
- // Find the scope that intercepts this interrupt.
- // It may be outermost PostponeInterruptsScope or innermost
- // SafeForInterruptsScope if any.
- // Return whether the interrupt has been intercepted.
- bool Intercept(StackGuard::InterruptFlag flag);
-
- InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
- : stack_guard_(isolate->stack_guard()),
- intercept_mask_(intercept_mask),
- intercepted_flags_(0),
- mode_(mode) {
- if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
- }
-
- private:
- StackGuard* stack_guard_;
- int intercept_mask_;
- int intercepted_flags_;
- Mode mode_;
- InterruptsScope* prev_;
-
- friend class StackGuard;
-};
-
-// Support for temporarily postponing interrupts. When the outermost
-// postpone scope is left the interrupts will be re-enabled and any
-// interrupts that occurred while in the scope will be taken into
-// account.
-class PostponeInterruptsScope : public InterruptsScope {
- public:
- PostponeInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
- : InterruptsScope(isolate, intercept_mask,
- InterruptsScope::kPostponeInterrupts) {}
- ~PostponeInterruptsScope() override = default;
-};
-
-// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
-// innermost scope is SafeForInterruptsScope ignoring any outer
-// PostponeInterruptsScopes.
-class SafeForInterruptsScope : public InterruptsScope {
- public:
- SafeForInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
- : InterruptsScope(isolate, intercept_mask,
- InterruptsScope::kRunInterrupts) {}
- ~SafeForInterruptsScope() override = default;
-};
-
class StackTraceFailureMessage {
public:
explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index c76f546d62..d216d3bc39 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -7,13 +7,16 @@
#include <memory>
#include "src/api/api-inl.h"
+#include "src/base/v8-fallthrough.h"
#include "src/execution/execution.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/logging/counters.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/keys.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
#include "src/strings/string-builder-inl.h"
#include "src/wasm/wasm-code-manager.h"
@@ -303,7 +306,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
} // namespace
Handle<Object> StackFrameBase::GetEvalOrigin() {
- if (!HasScript()) return isolate_->factory()->undefined_value();
+ if (!HasScript() || !IsEval()) return isolate_->factory()->undefined_value();
return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
}
@@ -321,12 +324,6 @@ bool StackFrameBase::IsEval() {
GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
}
-MaybeHandle<String> StackFrameBase::ToString() {
- IncrementalStringBuilder builder(isolate_);
- ToString(builder);
- return builder.Finish();
-}
-
void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
int frame_ix) {
DCHECK(!array->IsWasmFrame(frame_ix));
@@ -365,7 +362,7 @@ Handle<Object> JSStackFrame::GetFileName() {
}
Handle<Object> JSStackFrame::GetFunctionName() {
- Handle<String> result = JSFunction::GetName(function_);
+ Handle<String> result = JSFunction::GetDebugName(function_);
if (result->length() != 0) return result;
if (HasScript() &&
@@ -514,177 +511,6 @@ bool JSStackFrame::IsToplevel() {
return receiver_->IsJSGlobalProxy() || receiver_->IsNullOrUndefined(isolate_);
}
-namespace {
-
-bool IsNonEmptyString(Handle<Object> object) {
- return (object->IsString() && String::cast(*object).length() > 0);
-}
-
-void AppendFileLocation(Isolate* isolate, StackFrameBase* call_site,
- IncrementalStringBuilder* builder) {
- if (call_site->IsNative()) {
- builder->AppendCString("native");
- return;
- }
-
- Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
- if (!file_name->IsString() && call_site->IsEval()) {
- Handle<Object> eval_origin = call_site->GetEvalOrigin();
- DCHECK(eval_origin->IsString());
- builder->AppendString(Handle<String>::cast(eval_origin));
- builder->AppendCString(", "); // Expecting source position to follow.
- }
-
- if (IsNonEmptyString(file_name)) {
- builder->AppendString(Handle<String>::cast(file_name));
- } else {
- // Source code does not originate from a file and is not native, but we
- // can still get the source position inside the source string, e.g. in
- // an eval string.
- builder->AppendCString("<anonymous>");
- }
-
- int line_number = call_site->GetLineNumber();
- if (line_number != StackFrameBase::kNone) {
- builder->AppendCharacter(':');
- Handle<String> line_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(line_number), isolate), isolate);
- builder->AppendString(line_string);
-
- int column_number = call_site->GetColumnNumber();
- if (column_number != StackFrameBase::kNone) {
- builder->AppendCharacter(':');
- Handle<String> column_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(column_number), isolate), isolate);
- builder->AppendString(column_string);
- }
- }
-}
-
-int StringIndexOf(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (pattern->length() > subject->length()) return -1;
- return String::IndexOf(isolate, subject, pattern, 0);
-}
-
-// Returns true iff
-// 1. the subject ends with '.' + pattern, or
-// 2. subject == pattern.
-bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (String::Equals(isolate, subject, pattern)) return true;
-
- FlatStringReader subject_reader(isolate, String::Flatten(isolate, subject));
- FlatStringReader pattern_reader(isolate, String::Flatten(isolate, pattern));
-
- int pattern_index = pattern_reader.length() - 1;
- int subject_index = subject_reader.length() - 1;
- for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
- if (subject_index < 0) {
- return false;
- }
-
- const uc32 subject_char = subject_reader.Get(subject_index);
- if (i == pattern_reader.length()) {
- if (subject_char != '.') return false;
- } else if (subject_char != pattern_reader.Get(pattern_index)) {
- return false;
- }
-
- pattern_index--;
- subject_index--;
- }
-
- return true;
-}
-
-void AppendMethodCall(Isolate* isolate, JSStackFrame* call_site,
- IncrementalStringBuilder* builder) {
- Handle<Object> type_name = call_site->GetTypeName();
- Handle<Object> method_name = call_site->GetMethodName();
- Handle<Object> function_name = call_site->GetFunctionName();
-
- if (IsNonEmptyString(function_name)) {
- Handle<String> function_string = Handle<String>::cast(function_name);
- if (IsNonEmptyString(type_name)) {
- Handle<String> type_string = Handle<String>::cast(type_name);
- bool starts_with_type_name =
- (StringIndexOf(isolate, function_string, type_string) == 0);
- if (!starts_with_type_name) {
- builder->AppendString(type_string);
- builder->AppendCharacter('.');
- }
- }
- builder->AppendString(function_string);
-
- if (IsNonEmptyString(method_name)) {
- Handle<String> method_string = Handle<String>::cast(method_name);
- if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
- builder->AppendCString(" [as ");
- builder->AppendString(method_string);
- builder->AppendCharacter(']');
- }
- }
- } else {
- if (IsNonEmptyString(type_name)) {
- builder->AppendString(Handle<String>::cast(type_name));
- builder->AppendCharacter('.');
- }
- if (IsNonEmptyString(method_name)) {
- builder->AppendString(Handle<String>::cast(method_name));
- } else {
- builder->AppendCString("<anonymous>");
- }
- }
-}
-
-} // namespace
-
-void JSStackFrame::ToString(IncrementalStringBuilder& builder) {
- Handle<Object> function_name = GetFunctionName();
-
- const bool is_toplevel = IsToplevel();
- const bool is_async = IsAsync();
- const bool is_promise_all = IsPromiseAll();
- const bool is_constructor = IsConstructor();
- const bool is_method_call = !(is_toplevel || is_constructor);
-
- if (is_async) {
- builder.AppendCString("async ");
- }
- if (is_promise_all) {
- // For `Promise.all(iterable)` frames we interpret the {offset_}
- // as the element index into `iterable` where the error occurred.
- builder.AppendCString("Promise.all (index ");
- Handle<String> index_string = isolate_->factory()->NumberToString(
- handle(Smi::FromInt(offset_), isolate_), isolate_);
- builder.AppendString(index_string);
- builder.AppendCString(")");
- return;
- }
- if (is_method_call) {
- AppendMethodCall(isolate_, this, &builder);
- } else if (is_constructor) {
- builder.AppendCString("new ");
- if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- builder.AppendCString("<anonymous>");
- }
- } else if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- AppendFileLocation(isolate_, this, &builder);
- return;
- }
-
- builder.AppendCString(" (");
- AppendFileLocation(isolate_, this, &builder);
- builder.AppendCString(")");
-
- return;
-}
-
int JSStackFrame::GetPosition() const {
Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
@@ -749,41 +575,6 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() {
return module_name;
}
-void WasmStackFrame::ToString(IncrementalStringBuilder& builder) {
- Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
- isolate_);
- MaybeHandle<String> module_name =
- WasmModuleObject::GetModuleNameOrNull(isolate_, module_object);
- MaybeHandle<String> function_name = WasmModuleObject::GetFunctionNameOrNull(
- isolate_, module_object, wasm_func_index_);
- bool has_name = !module_name.is_null() || !function_name.is_null();
- if (has_name) {
- if (module_name.is_null()) {
- builder.AppendString(function_name.ToHandleChecked());
- } else {
- builder.AppendString(module_name.ToHandleChecked());
- if (!function_name.is_null()) {
- builder.AppendCString(".");
- builder.AppendString(function_name.ToHandleChecked());
- }
- }
- builder.AppendCString(" (");
- }
-
- builder.AppendCString("wasm-function[");
-
- char buffer[16];
- SNPrintF(ArrayVector(buffer), "%u]", wasm_func_index_);
- builder.AppendCString(buffer);
-
- SNPrintF(ArrayVector(buffer), ":%d", GetPosition());
- builder.AppendCString(buffer);
-
- if (has_name) builder.AppendCString(")");
-
- return;
-}
-
int WasmStackFrame::GetPosition() const {
return IsInterpreted()
? offset_
@@ -791,6 +582,14 @@ int WasmStackFrame::GetPosition() const {
code_, offset_);
}
+int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
+
+int WasmStackFrame::GetModuleOffset() const {
+ const int function_offset =
+ wasm_instance_->module_object().GetFunctionOffset(wasm_func_index_);
+ return function_offset + GetPosition();
+}
+
Handle<Object> WasmStackFrame::Null() const {
return isolate_->factory()->null_value();
}
@@ -858,24 +657,6 @@ int AsmJsWasmStackFrame::GetColumnNumber() {
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
-void AsmJsWasmStackFrame::ToString(IncrementalStringBuilder& builder) {
- // The string should look exactly as the respective javascript frame string.
- // Keep this method in line to
- // JSStackFrame::ToString(IncrementalStringBuilder&).
- Handle<Object> function_name = GetFunctionName();
-
- if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- builder.AppendCString(" (");
- }
-
- AppendFileLocation(isolate_, this, &builder);
-
- if (IsNonEmptyString(function_name)) builder.AppendCString(")");
-
- return;
-}
-
FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
Handle<FrameArray> array, int frame_ix)
: isolate_(isolate), array_(array), frame_ix_(frame_ix) {}
@@ -914,8 +695,7 @@ StackFrameBase* FrameArrayIterator::Frame() {
namespace {
MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
- Handle<FrameArray> frame_array,
- int frame_index) {
+ Handle<StackTraceFrame> frame) {
Handle<JSFunction> target =
handle(isolate->native_context()->callsite_function(), isolate);
@@ -924,6 +704,14 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
isolate, obj,
JSObject::New(target, target, Handle<AllocationSite>::null()), Object);
+ // TODO(szuend): Introduce a new symbol "call_site_frame_symbol" and set
+ // it to the StackTraceFrame. The CallSite API builtins can then
+ // be implemented using StackFrameInfo objects.
+
+ Handle<FrameArray> frame_array(FrameArray::cast(frame->frame_array()),
+ isolate);
+ int frame_index = frame->frame_index();
+
Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
RETURN_ON_EXCEPTION(isolate,
JSObject::SetOwnPropertyIgnoreAttributes(
@@ -943,14 +731,16 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
// a JSArray of JSCallSite objects.
MaybeHandle<JSArray> GetStackFrames(Isolate* isolate,
- Handle<FrameArray> elems) {
- const int frame_count = elems->FrameCount();
+ Handle<FixedArray> elems) {
+ const int frame_count = elems->length();
Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; i++) {
Handle<Object> site;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, site,
- ConstructCallSite(isolate, elems, i), JSArray);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
+ isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, site, ConstructCallSite(isolate, frame),
+ JSArray);
frames->set(i, *site);
}
@@ -1013,13 +803,14 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> raw_stack) {
DCHECK(raw_stack->IsFixedArray());
- Handle<FrameArray> elems = Handle<FrameArray>::cast(raw_stack);
+ Handle<FixedArray> elems = Handle<FixedArray>::cast(raw_stack);
const bool in_recursion = isolate->formatting_stack_trace();
if (!in_recursion) {
+ Handle<Context> error_context = error->GetCreationContext();
+ DCHECK(error_context->IsNativeContext());
+
if (isolate->HasPrepareStackTraceCallback()) {
- Handle<Context> error_context = error->GetCreationContext();
- DCHECK(!error_context.is_null() && error_context->IsNativeContext());
PrepareStackTraceScope scope(isolate);
Handle<JSArray> sites;
@@ -1033,7 +824,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Object);
return result;
} else {
- Handle<JSFunction> global_error = isolate->error_function();
+ Handle<JSFunction> global_error =
+ handle(error_context->error_function(), isolate);
// If there's a user-specified "prepareStackTrace" function, call it on
// the frames and use its result.
@@ -1080,11 +872,13 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
wasm::WasmCodeRefScope wasm_code_ref_scope;
- for (FrameArrayIterator it(isolate, elems); it.HasFrame(); it.Advance()) {
+ for (int i = 0; i < elems->length(); ++i) {
builder.AppendCString("\n at ");
- StackFrameBase* frame = it.Frame();
- frame->ToString(builder);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
+ isolate);
+ SerializeStackTraceFrame(isolate, frame, builder);
+
if (isolate->has_pending_exception()) {
// CallSite.toString threw. Parts of the current frame might have been
// stringified already regardless. Still, try to append a string
@@ -1140,7 +934,7 @@ const char* MessageFormatter::TemplateString(MessageTemplate index) {
return STRING;
MESSAGE_TEMPLATES(CASE)
#undef CASE
- case MessageTemplate::kLastMessage:
+ case MessageTemplate::kMessageCount:
default:
return nullptr;
}
@@ -1183,7 +977,7 @@ MaybeHandle<String> MessageFormatter::Format(Isolate* isolate,
MaybeHandle<Object> ErrorUtils::Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- bool suppress_detailed_trace) {
+ StackTraceCollection stack_trace_collection) {
// 1. If NewTarget is undefined, let newTarget be the active function object,
// else let newTarget be NewTarget.
@@ -1217,17 +1011,19 @@ MaybeHandle<Object> ErrorUtils::Construct(
Object);
}
- // Optionally capture a more detailed stack trace for the message.
- if (!suppress_detailed_trace) {
- RETURN_ON_EXCEPTION(isolate, isolate->CaptureAndSetDetailedStackTrace(err),
- Object);
+ switch (stack_trace_collection) {
+ case StackTraceCollection::kDetailed:
+ RETURN_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetDetailedStackTrace(err), Object);
+ V8_FALLTHROUGH;
+ case StackTraceCollection::kSimple:
+ RETURN_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
+ Object);
+ break;
+ case StackTraceCollection::kNone:
+ break;
}
-
- // Capture a simple stack trace for the stack property.
- RETURN_ON_EXCEPTION(isolate,
- isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
- Object);
-
return err;
}
@@ -1356,7 +1152,7 @@ MaybeHandle<Object> ErrorUtils::MakeGenericError(
Handle<Object> no_caller;
return ErrorUtils::Construct(isolate, constructor, constructor, msg, mode,
- no_caller, false);
+ no_caller, StackTraceCollection::kDetailed);
}
} // namespace internal
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 0fc3692f64..23f32c2fe1 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -12,7 +12,7 @@
#include <memory>
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/handles/handles.h"
namespace v8 {
@@ -24,7 +24,6 @@ class WasmCode;
// Forward declarations.
class AbstractCode;
class FrameArray;
-class IncrementalStringBuilder;
class JSMessageObject;
class LookupIterator;
class SharedFunctionInfo;
@@ -94,9 +93,6 @@ class StackFrameBase {
virtual bool IsConstructor() = 0;
virtual bool IsStrict() const = 0;
- MaybeHandle<String> ToString();
- virtual void ToString(IncrementalStringBuilder& builder) = 0;
-
// Used to signal that the requested field is unknown.
static const int kNone = -1;
@@ -139,8 +135,6 @@ class JSStackFrame : public StackFrameBase {
bool IsConstructor() override { return is_constructor_; }
bool IsStrict() const override { return is_strict_; }
- void ToString(IncrementalStringBuilder& builder) override;
-
private:
JSStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
@@ -177,7 +171,7 @@ class WasmStackFrame : public StackFrameBase {
int GetPosition() const override;
int GetLineNumber() override { return wasm_func_index_; }
- int GetColumnNumber() override { return kNone; }
+ int GetColumnNumber() override;
int GetPromiseIndex() const override { return kNone; }
@@ -189,8 +183,6 @@ class WasmStackFrame : public StackFrameBase {
bool IsStrict() const override { return false; }
bool IsInterpreted() const { return code_ == nullptr; }
- void ToString(IncrementalStringBuilder& builder) override;
-
protected:
Handle<Object> Null() const;
@@ -203,6 +195,8 @@ class WasmStackFrame : public StackFrameBase {
int offset_;
private:
+ int GetModuleOffset() const;
+
WasmStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
@@ -224,8 +218,6 @@ class AsmJsWasmStackFrame : public WasmStackFrame {
int GetLineNumber() override;
int GetColumnNumber() override;
- void ToString(IncrementalStringBuilder& builder) override;
-
private:
friend class FrameArrayIterator;
AsmJsWasmStackFrame() = default;
@@ -267,10 +259,13 @@ enum FrameSkipMode {
class ErrorUtils : public AllStatic {
public:
+ // |kNone| is useful when you don't need the stack information at all, for
+ // example when creating a deserialized error.
+ enum class StackTraceCollection { kDetailed, kSimple, kNone };
static MaybeHandle<Object> Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- bool suppress_detailed_trace);
+ StackTraceCollection stack_trace_collection);
static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index 8088935154..3cc95205fa 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -253,7 +253,7 @@ void MicrotaskQueue::OnCompleted(Isolate* isolate) {
// set is still open (whether to clear it after every microtask or once
// during a microtask checkpoint). See also
// https://github.com/tc39/proposal-weakrefs/issues/39 .
- isolate->heap()->ClearKeepDuringJobSet();
+ isolate->heap()->ClearKeptObjects();
FireMicrotasksCompletedCallback(isolate);
}
diff --git a/deps/v8/src/execution/mips/frame-constants-mips.cc b/deps/v8/src/execution/mips/frame-constants-mips.cc
index 95d6eb951c..4c930e71a9 100644
--- a/deps/v8/src/execution/mips/frame-constants-mips.cc
+++ b/deps/v8/src/execution/mips/frame-constants-mips.cc
@@ -4,12 +4,11 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/codegen/assembler.h"
+#include "src/execution/mips/frame-constants-mips.h"
+
#include "src/codegen/mips/assembler-mips-inl.h"
-#include "src/codegen/mips/assembler-mips.h"
#include "src/execution/frame-constants.h"
-
-#include "src/execution/mips/frame-constants-mips.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index e0448f232a..6a3a160ec3 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -1356,8 +1356,8 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
return ret;
}
-void Simulator::round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs) {
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1373,32 +1373,32 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs) {
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1414,33 +1414,33 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
template <typename T_fp, typename T_int>
-void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int) {
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1456,32 +1456,32 @@ void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_msacsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(toRound + 0.5);
- rounded_int = static_cast<T_int>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<T_int>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1;
+ *rounded_int -= 1;
+ *rounded -= 1;
}
break;
case kRoundToZero:
- rounded = trunc(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs) {
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1497,32 +1497,32 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs) {
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1538,26 +1538,26 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
@@ -2512,18 +2512,18 @@ float FPAbs<float>(float a) {
}
template <typename T>
-static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
if (std::isnan(a) && std::isnan(b)) {
- result = a;
+ *result = a;
} else if (std::isnan(a)) {
- result = b;
+ *result = b;
} else if (std::isnan(b)) {
- result = a;
+ *result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
- result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
return false;
}
@@ -2533,7 +2533,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
template <typename T>
static T FPUMin(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
return result;
} else {
return b < a ? b : a;
@@ -2543,7 +2543,7 @@ static T FPUMin(T a, T b) {
template <typename T>
static T FPUMax(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
return result;
} else {
return b > a ? b : a;
@@ -2553,7 +2553,7 @@ static T FPUMax(T a, T b) {
template <typename T>
static T FPUMinA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) < FPAbs(b)) {
result = a;
} else if (FPAbs(b) < FPAbs(a)) {
@@ -2568,7 +2568,7 @@ static T FPUMinA(T a, T b) {
template <typename T>
static T FPUMaxA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) > FPAbs(b)) {
result = a;
} else if (FPAbs(b) > FPAbs(a)) {
@@ -2822,7 +2822,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -2876,7 +2876,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
int64_t result;
double rounded;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -3489,7 +3489,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
int64_t result;
float rounded;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -3502,7 +3502,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_W_S: {
float rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -5271,128 +5271,128 @@ void Simulator::DecodeTypeMsa3R() {
}
template <typename T_int, typename T_fp, typename T_reg>
-void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
const T_int all_ones = static_cast<T_int>(-1);
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
switch (opcode) {
case FCUN: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCEQ: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUEQ: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLT: {
if (s_element >= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULT: {
if (s_element < t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLE: {
if (s_element > t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULE: {
if (s_element <= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCOR: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUNE: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCNE: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FADD:
- wd = bit_cast<T_int>(s_element + t_element);
+ *wd = bit_cast<T_int>(s_element + t_element);
break;
case FSUB:
- wd = bit_cast<T_int>(s_element - t_element);
+ *wd = bit_cast<T_int>(s_element - t_element);
break;
case FMUL:
- wd = bit_cast<T_int>(s_element * t_element);
+ *wd = bit_cast<T_int>(s_element * t_element);
break;
case FDIV: {
if (t_element == 0) {
- wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- wd = bit_cast<T_int>(s_element / t_element);
+ *wd = bit_cast<T_int>(s_element / t_element);
}
} break;
case FMADD:
- wd = bit_cast<T_int>(
- std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FMSUB:
- wd = bit_cast<T_int>(
- std::fma(s_element, -t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, -t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FEXP2:
- wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ *wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
break;
case FMIN:
- wd = bit_cast<T_int>(std::min(s_element, t_element));
+ *wd = bit_cast<T_int>(std::min(s_element, t_element));
break;
case FMAX:
- wd = bit_cast<T_int>(std::max(s_element, t_element));
+ *wd = bit_cast<T_int>(std::max(s_element, t_element));
break;
case FMIN_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
} break;
case FMAX_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
} break;
case FSOR:
@@ -5414,7 +5414,7 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
}
template <typename T_int, typename T_int_dbl, typename T_reg>
-void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
// using T_uint = typename std::make_unsigned<T_int>::type;
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
@@ -5432,16 +5432,16 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
if (product == min_fix_dbl) {
product = max_fix_dbl;
}
- wd = static_cast<T_int>(product >> shift);
+ *wd = static_cast<T_int>(product >> shift);
} break;
case MADD_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUB_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MULR_Q: {
@@ -5449,23 +5449,23 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
if (product == min_fix_dbl) {
- wd = static_cast<T_int>(max_fix_dbl >> shift);
+ *wd = static_cast<T_int>(max_fix_dbl >> shift);
break;
}
- wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ *wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
} break;
case MADDR_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUBR_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
default:
@@ -5588,19 +5588,19 @@ void Simulator::DecodeTypeMsa3RF() {
#undef PACK_FLOAT16
#undef FEXDO_DF
case FTQ:
-#define FTQ_DF(source, dst, fp_type, int_type) \
- element = bit_cast<fp_type>(source) * \
- (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
- if (element > std::numeric_limits<int_type>::max()) { \
- dst = std::numeric_limits<int_type>::max(); \
- } else if (element < std::numeric_limits<int_type>::min()) { \
- dst = std::numeric_limits<int_type>::min(); \
- } else if (std::isnan(element)) { \
- dst = 0; \
- } else { \
- int_type fixed_point; \
- round_according_to_msacsr(element, element, fixed_point); \
- dst = fixed_point; \
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, &element, &fixed_point); \
+ dst = fixed_point; \
}
switch (DecodeMsaDataFormat()) {
@@ -5623,13 +5623,13 @@ void Simulator::DecodeTypeMsa3RF() {
}
break;
#undef FTQ_DF
-#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, &(wd)); \
}
-#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, &(wd)); \
}
case MADD_Q:
case MSUB_Q:
@@ -5859,7 +5859,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
#undef QUIET_BIT_D
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
-T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
Simulator* sim) {
using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
@@ -5878,37 +5878,37 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_INFINITE:
if (std::signbit(element)) {
- dst = NEG_INFINITY_BIT;
+ *dst = NEG_INFINITY_BIT;
} else {
- dst = POS_INFINITY_BIT;
+ *dst = POS_INFINITY_BIT;
}
break;
case FP_NAN:
if (isSnan(element)) {
- dst = SNAN_BIT;
+ *dst = SNAN_BIT;
} else {
- dst = QNAN_BIT;
+ *dst = QNAN_BIT;
}
break;
case FP_NORMAL:
if (std::signbit(element)) {
- dst = NEG_NORMAL_BIT;
+ *dst = NEG_NORMAL_BIT;
} else {
- dst = POS_NORMAL_BIT;
+ *dst = POS_NORMAL_BIT;
}
break;
case FP_SUBNORMAL:
if (std::signbit(element)) {
- dst = NEG_SUBNORMAL_BIT;
+ *dst = NEG_SUBNORMAL_BIT;
} else {
- dst = POS_SUBNORMAL_BIT;
+ *dst = POS_SUBNORMAL_BIT;
}
break;
case FP_ZERO:
if (std::signbit(element)) {
- dst = NEG_ZERO_BIT;
+ *dst = NEG_ZERO_BIT;
} else {
- dst = POS_ZERO_BIT;
+ *dst = POS_ZERO_BIT;
}
break;
default:
@@ -5932,11 +5932,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= min_int) {
- dst = element >= max_int ? max_int : min_int;
+ *dst = element >= max_int ? max_int : min_int;
} else {
- dst = static_cast<T_int>(std::trunc(element));
+ *dst = static_cast<T_int>(std::trunc(element));
}
break;
}
@@ -5944,49 +5944,49 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= 0) {
- dst = element >= max_int ? max_int : 0;
+ *dst = element >= max_int ? max_int : 0;
} else {
- dst = static_cast<T_uint>(std::trunc(element));
+ *dst = static_cast<T_uint>(std::trunc(element));
}
break;
}
case FSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::sqrt(element));
+ *dst = bit_cast<T_int>(std::sqrt(element));
}
break;
}
case FRSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / std::sqrt(element));
+ *dst = bit_cast<T_int>(1 / std::sqrt(element));
}
break;
}
case FRCP: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / element);
+ *dst = bit_cast<T_int>(1 / element);
}
break;
}
case FRINT: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
T_int dummy;
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
- dst = bit_cast<T_int>(element);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, &dummy);
+ *dst = bit_cast<T_int>(element);
}
break;
}
@@ -5995,19 +5995,19 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_NORMAL:
case FP_SUBNORMAL:
- dst = bit_cast<T_int>(std::logb(element));
+ *dst = bit_cast<T_int>(std::logb(element));
break;
case FP_ZERO:
- dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
break;
case FP_NAN:
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
break;
case FP_INFINITE:
if (element < 0) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
}
break;
default:
@@ -6020,11 +6020,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < min_int || element > max_int) {
- dst = element > max_int ? max_int : min_int;
+ *dst = element > max_int ? max_int : min_int;
} else {
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
break;
}
@@ -6032,22 +6032,22 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < 0 || element > max_uint) {
- dst = element > max_uint ? max_uint : 0;
+ *dst = element > max_uint ? max_uint : 0;
} else {
T_uint res;
- sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
- dst = *reinterpret_cast<T_int*>(&res);
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
+ *dst = *reinterpret_cast<T_int*>(&res);
}
break;
}
case FFINT_S:
- dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
using uT_src = typename std::make_unsigned<T_src>::type;
- dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
@@ -6157,12 +6157,12 @@ void Simulator::DecodeTypeMsa2RF() {
switch (DecodeMsaDataFormat()) {
case MSA_WORD:
for (int i = 0; i < kMSALanesWord; i++) {
- Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], &wd.w[i], this);
}
break;
case MSA_DWORD:
for (int i = 0; i < kMSALanesDword; i++) {
- Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], &wd.d[i], this);
}
break;
default:
diff --git a/deps/v8/src/execution/mips/simulator-mips.h b/deps/v8/src/execution/mips/simulator-mips.h
index b5712d1a82..28e38fd0a5 100644
--- a/deps/v8/src/execution/mips/simulator-mips.h
+++ b/deps/v8/src/execution/mips/simulator-mips.h
@@ -258,16 +258,16 @@ class Simulator : public SimulatorBase {
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(double original, double rounded);
bool set_fcsr_round64_error(float original, float rounded);
- void round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs);
- void round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs);
template <typename Tfp, typename Tint>
- void round_according_to_msacsr(Tfp toRound, Tfp& rounded, Tint& rounded_int);
- void round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs);
- void round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs);
+ void round_according_to_msacsr(Tfp toRound, Tfp* rounded, Tint* rounded_int);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.cc b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
index 68398605ba..97ef183592 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
@@ -4,10 +4,9 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/codegen/assembler.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
-#include "src/codegen/mips64/assembler-mips64.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
#include "src/execution/mips64/frame-constants-mips64.h"
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 7c45e7f82d..3fbf1961a8 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -1285,8 +1285,8 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
}
// For cvt instructions only
-void Simulator::round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs) {
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1302,32 +1302,32 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs) {
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1343,33 +1343,33 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
// for cvt instructions only
-void Simulator::round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs) {
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1385,32 +1385,32 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs) {
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1426,33 +1426,33 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
template <typename T_fp, typename T_int>
-void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int) {
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1468,26 +1468,26 @@ void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_msacsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(toRound + 0.5);
- rounded_int = static_cast<T_int>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<T_int>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
}
}
@@ -2507,18 +2507,18 @@ float FPAbs<float>(float a) {
}
template <typename T>
-static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
if (std::isnan(a) && std::isnan(b)) {
- result = a;
+ *result = a;
} else if (std::isnan(a)) {
- result = b;
+ *result = b;
} else if (std::isnan(b)) {
- result = a;
+ *result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
- result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
return false;
}
@@ -2528,7 +2528,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
template <typename T>
static T FPUMin(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
return result;
} else {
return b < a ? b : a;
@@ -2538,7 +2538,7 @@ static T FPUMin(T a, T b) {
template <typename T>
static T FPUMax(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
return result;
} else {
return b > a ? b : a;
@@ -2548,7 +2548,7 @@ static T FPUMax(T a, T b) {
template <typename T>
static T FPUMinA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) < FPAbs(b)) {
result = a;
} else if (FPAbs(b) < FPAbs(a)) {
@@ -2563,7 +2563,7 @@ static T FPUMinA(T a, T b) {
template <typename T>
static T FPUMaxA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) > FPAbs(b)) {
result = a;
} else if (FPAbs(b) > FPAbs(a)) {
@@ -2829,7 +2829,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_L_S: {
float rounded;
int64_t result;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -2839,7 +2839,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_W_S: {
float rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -3189,7 +3189,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -3243,7 +3243,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_L_D: { // Mips64r2: Truncate double to 64-bit long-word.
double rounded;
int64_t result;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -5544,128 +5544,128 @@ void Simulator::DecodeTypeMsa3R() {
}
template <typename T_int, typename T_fp, typename T_reg>
-void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
const T_int all_ones = static_cast<T_int>(-1);
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
switch (opcode) {
case FCUN: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCEQ: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUEQ: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLT: {
if (s_element >= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULT: {
if (s_element < t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLE: {
if (s_element > t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULE: {
if (s_element <= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCOR: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUNE: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCNE: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FADD:
- wd = bit_cast<T_int>(s_element + t_element);
+ *wd = bit_cast<T_int>(s_element + t_element);
break;
case FSUB:
- wd = bit_cast<T_int>(s_element - t_element);
+ *wd = bit_cast<T_int>(s_element - t_element);
break;
case FMUL:
- wd = bit_cast<T_int>(s_element * t_element);
+ *wd = bit_cast<T_int>(s_element * t_element);
break;
case FDIV: {
if (t_element == 0) {
- wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- wd = bit_cast<T_int>(s_element / t_element);
+ *wd = bit_cast<T_int>(s_element / t_element);
}
} break;
case FMADD:
- wd = bit_cast<T_int>(
- std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FMSUB:
- wd = bit_cast<T_int>(
- std::fma(-s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(-s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FEXP2:
- wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ *wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
break;
case FMIN:
- wd = bit_cast<T_int>(std::min(s_element, t_element));
+ *wd = bit_cast<T_int>(std::min(s_element, t_element));
break;
case FMAX:
- wd = bit_cast<T_int>(std::max(s_element, t_element));
+ *wd = bit_cast<T_int>(std::max(s_element, t_element));
break;
case FMIN_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
} break;
case FMAX_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
} break;
case FSOR:
@@ -5687,7 +5687,7 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
}
template <typename T_int, typename T_int_dbl, typename T_reg>
-void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
// using T_uint = typename std::make_unsigned<T_int>::type;
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
@@ -5705,16 +5705,16 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
if (product == min_fix_dbl) {
product = max_fix_dbl;
}
- wd = static_cast<T_int>(product >> shift);
+ *wd = static_cast<T_int>(product >> shift);
} break;
case MADD_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUB_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MULR_Q: {
@@ -5722,23 +5722,23 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
if (product == min_fix_dbl) {
- wd = static_cast<T_int>(max_fix_dbl >> shift);
+ *wd = static_cast<T_int>(max_fix_dbl >> shift);
break;
}
- wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ *wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
} break;
case MADDR_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUBR_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
default:
@@ -5861,19 +5861,19 @@ void Simulator::DecodeTypeMsa3RF() {
#undef PACK_FLOAT16
#undef FEXDO_DF
case FTQ:
-#define FTQ_DF(source, dst, fp_type, int_type) \
- element = bit_cast<fp_type>(source) * \
- (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
- if (element > std::numeric_limits<int_type>::max()) { \
- dst = std::numeric_limits<int_type>::max(); \
- } else if (element < std::numeric_limits<int_type>::min()) { \
- dst = std::numeric_limits<int_type>::min(); \
- } else if (std::isnan(element)) { \
- dst = 0; \
- } else { \
- int_type fixed_point; \
- round_according_to_msacsr(element, element, fixed_point); \
- dst = fixed_point; \
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, &element, &fixed_point); \
+ dst = fixed_point; \
}
switch (DecodeMsaDataFormat()) {
@@ -5896,13 +5896,13 @@ void Simulator::DecodeTypeMsa3RF() {
}
break;
#undef FTQ_DF
-#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, &(wd)); \
}
-#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, &(wd)); \
}
case MADD_Q:
case MSUB_Q:
@@ -6139,7 +6139,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
#undef QUIET_BIT_D
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
-T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
Simulator* sim) {
using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
@@ -6158,37 +6158,37 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_INFINITE:
if (std::signbit(element)) {
- dst = NEG_INFINITY_BIT;
+ *dst = NEG_INFINITY_BIT;
} else {
- dst = POS_INFINITY_BIT;
+ *dst = POS_INFINITY_BIT;
}
break;
case FP_NAN:
if (isSnan(element)) {
- dst = SNAN_BIT;
+ *dst = SNAN_BIT;
} else {
- dst = QNAN_BIT;
+ *dst = QNAN_BIT;
}
break;
case FP_NORMAL:
if (std::signbit(element)) {
- dst = NEG_NORMAL_BIT;
+ *dst = NEG_NORMAL_BIT;
} else {
- dst = POS_NORMAL_BIT;
+ *dst = POS_NORMAL_BIT;
}
break;
case FP_SUBNORMAL:
if (std::signbit(element)) {
- dst = NEG_SUBNORMAL_BIT;
+ *dst = NEG_SUBNORMAL_BIT;
} else {
- dst = POS_SUBNORMAL_BIT;
+ *dst = POS_SUBNORMAL_BIT;
}
break;
case FP_ZERO:
if (std::signbit(element)) {
- dst = NEG_ZERO_BIT;
+ *dst = NEG_ZERO_BIT;
} else {
- dst = POS_ZERO_BIT;
+ *dst = POS_ZERO_BIT;
}
break;
default:
@@ -6212,11 +6212,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= min_int) {
- dst = element >= max_int ? max_int : min_int;
+ *dst = element >= max_int ? max_int : min_int;
} else {
- dst = static_cast<T_int>(std::trunc(element));
+ *dst = static_cast<T_int>(std::trunc(element));
}
break;
}
@@ -6224,49 +6224,49 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= 0) {
- dst = element >= max_int ? max_int : 0;
+ *dst = element >= max_int ? max_int : 0;
} else {
- dst = static_cast<T_uint>(std::trunc(element));
+ *dst = static_cast<T_uint>(std::trunc(element));
}
break;
}
case FSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::sqrt(element));
+ *dst = bit_cast<T_int>(std::sqrt(element));
}
break;
}
case FRSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / std::sqrt(element));
+ *dst = bit_cast<T_int>(1 / std::sqrt(element));
}
break;
}
case FRCP: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / element);
+ *dst = bit_cast<T_int>(1 / element);
}
break;
}
case FRINT: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
T_int dummy;
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
- dst = bit_cast<T_int>(element);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, &dummy);
+ *dst = bit_cast<T_int>(element);
}
break;
}
@@ -6275,19 +6275,19 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_NORMAL:
case FP_SUBNORMAL:
- dst = bit_cast<T_int>(std::logb(element));
+ *dst = bit_cast<T_int>(std::logb(element));
break;
case FP_ZERO:
- dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
break;
case FP_NAN:
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
break;
case FP_INFINITE:
if (element < 0) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
}
break;
default:
@@ -6300,11 +6300,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < min_int || element > max_int) {
- dst = element > max_int ? max_int : min_int;
+ *dst = element > max_int ? max_int : min_int;
} else {
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
break;
}
@@ -6312,22 +6312,22 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < 0 || element > max_uint) {
- dst = element > max_uint ? max_uint : 0;
+ *dst = element > max_uint ? max_uint : 0;
} else {
T_uint res;
- sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
- dst = *reinterpret_cast<T_int*>(&res);
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
+ *dst = *reinterpret_cast<T_int*>(&res);
}
break;
}
case FFINT_S:
- dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
using uT_src = typename std::make_unsigned<T_src>::type;
- dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
@@ -6437,12 +6437,12 @@ void Simulator::DecodeTypeMsa2RF() {
switch (DecodeMsaDataFormat()) {
case MSA_WORD:
for (int i = 0; i < kMSALanesWord; i++) {
- Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], &wd.w[i], this);
}
break;
case MSA_DWORD:
for (int i = 0; i < kMSALanesDword; i++) {
- Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], &wd.d[i], this);
}
break;
default:
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.h b/deps/v8/src/execution/mips64/simulator-mips64.h
index d1251f5f0e..2bfcbe9d98 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.h
+++ b/deps/v8/src/execution/mips64/simulator-mips64.h
@@ -255,17 +255,17 @@ class Simulator : public SimulatorBase {
bool set_fcsr_round64_error(double original, double rounded);
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(float original, float rounded);
- void round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs);
- void round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs);
- void round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs);
- void round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs);
template <typename T_fp, typename T_int>
- void round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int);
+ void round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int);
void set_fcsr_rounding_mode(FPURoundingMode mode);
void set_msacsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 6cd4daa33c..96308f7f5b 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -342,7 +342,7 @@ void PPCDebugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 8093497168..8a82e32243 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -372,7 +372,7 @@ void S390Debugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
@@ -5149,27 +5149,6 @@ EVALUATE(STM) {
return length;
}
-EVALUATE(TM) {
- DCHECK_OPCODE(TM);
- // Test Under Mask (Mem - Imm) (8)
- DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
- int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
- intptr_t addr = b1_val + d1_val;
- uint8_t mem_val = ReadB(addr);
- uint8_t selected_bits = mem_val & imm_val;
- // CC0: Selected bits are zero
- // CC1: Selected bits mixed zeros and ones
- // CC3: Selected bits all ones
- if (0 == selected_bits) {
- condition_reg_ = CC_EQ; // CC0
- } else if (selected_bits == imm_val) {
- condition_reg_ = 0x1; // CC3
- } else {
- condition_reg_ = 0x4; // CC1
- }
- return length;
-}
-
EVALUATE(MVI) {
UNIMPLEMENTED();
USE(instr);
@@ -5595,7 +5574,8 @@ EVALUATE(LLILL) {
return 0;
}
-inline static int TestUnderMask(uint16_t val, uint16_t mask) {
+inline static int TestUnderMask(uint16_t val, uint16_t mask,
+ bool is_tm_or_tmy) {
// Test if all selected bits are zeros or mask is zero
if (0 == (mask & val)) {
return 0x8;
@@ -5607,6 +5587,13 @@ inline static int TestUnderMask(uint16_t val, uint16_t mask) {
}
// Now we know selected bits mixed zeros and ones
+ // Test if it is TM or TMY since they have
+ // different CC result from TMLL/TMLH/TMHH/TMHL
+ if (is_tm_or_tmy) {
+ return 0x4;
+ }
+
+ // Now we know the instruction is TMLL/TMLH/TMHH/TMHL
// Test if the leftmost bit is zero or one
#if defined(__GNUC__)
int leadingZeros = __builtin_clz(mask);
@@ -5639,7 +5626,8 @@ EVALUATE(TMLH) {
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint32_t value = get_low_register<uint32_t>(r1) >> 16;
uint32_t mask = i2 & 0x0000FFFF;
- condition_reg_ = TestUnderMask(value, mask);
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
return length; // DONE
}
@@ -5648,20 +5636,29 @@ EVALUATE(TMLL) {
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint32_t value = get_low_register<uint32_t>(r1) & 0x0000FFFF;
uint32_t mask = i2 & 0x0000FFFF;
- condition_reg_ = TestUnderMask(value, mask);
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
return length; // DONE
}
EVALUATE(TMHH) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(TMHH);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_high_register<uint32_t>(r1) >> 16;
+ uint32_t mask = i2 & 0x0000FFFF;
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
+ return length;
}
EVALUATE(TMHL) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(TMHL);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_high_register<uint32_t>(r1) & 0x0000FFFF;
+ uint32_t mask = i2 & 0x0000FFFF;
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
+ return length;
}
EVALUATE(BRAS) {
@@ -9972,26 +9969,31 @@ EVALUATE(ECAG) {
return 0;
}
+EVALUATE(TM) {
+ DCHECK_OPCODE(TM);
+ // Test Under Mask (Mem - Imm) (8)
+ DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t selected_bits = mem_val & imm_val;
+ // is TM
+ bool is_tm_or_tmy = 1;
+ condition_reg_ = TestUnderMask(selected_bits, imm_val, is_tm_or_tmy);
+ return length;
+}
+
EVALUATE(TMY) {
DCHECK_OPCODE(TMY);
// Test Under Mask (Mem - Imm) (8)
- DECODE_SIY_INSTRUCTION(b1, d1, i2);
+ DECODE_SIY_INSTRUCTION(b1, d1_val, imm_val);
int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
- intptr_t d1_val = d1;
intptr_t addr = b1_val + d1_val;
uint8_t mem_val = ReadB(addr);
- uint8_t imm_val = i2;
uint8_t selected_bits = mem_val & imm_val;
- // CC0: Selected bits are zero
- // CC1: Selected bits mixed zeros and ones
- // CC3: Selected bits all ones
- if (0 == selected_bits) {
- condition_reg_ = CC_EQ; // CC0
- } else if (selected_bits == imm_val) {
- condition_reg_ = 0x1; // CC3
- } else {
- condition_reg_ = 0x4; // CC1
- }
+ // is TMY
+ bool is_tm_or_tmy = 1;
+ condition_reg_ = TestUnderMask(selected_bits, imm_val, is_tm_or_tmy);
return length;
}
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
new file mode 100644
index 0000000000..e5c24cef1e
--- /dev/null
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -0,0 +1,345 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/stack-guard.h"
+
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/execution/interrupts-scope.h"
+#include "src/execution/isolate.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/simulator.h"
+#include "src/logging/counters.h"
+#include "src/roots/roots-inl.h"
+#include "src/utils/memcopy.h"
+#include "src/wasm/wasm-engine.h"
+
+namespace v8 {
+namespace internal {
+
+void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
+ DCHECK_NOT_NULL(isolate_);
+ thread_local_.set_jslimit(kInterruptLimit);
+ thread_local_.set_climit(kInterruptLimit);
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::reset_limits(const ExecutionAccess& lock) {
+ DCHECK_NOT_NULL(isolate_);
+ thread_local_.set_jslimit(thread_local_.real_jslimit_);
+ thread_local_.set_climit(thread_local_.real_climit_);
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::SetStackLimit(uintptr_t limit) {
+ ExecutionAccess access(isolate_);
+ // If the current limits are special (e.g. due to a pending interrupt) then
+ // leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
+ if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
+ thread_local_.set_jslimit(jslimit);
+ }
+ if (thread_local_.climit() == thread_local_.real_climit_) {
+ thread_local_.set_climit(limit);
+ }
+ thread_local_.real_climit_ = limit;
+ thread_local_.real_jslimit_ = jslimit;
+}
+
+void StackGuard::AdjustStackLimitForSimulator() {
+ ExecutionAccess access(isolate_);
+ uintptr_t climit = thread_local_.real_climit_;
+ // If the current limits are special (e.g. due to a pending interrupt) then
+ // leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
+ if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
+ thread_local_.set_jslimit(jslimit);
+ isolate_->heap()->SetStackLimits();
+ }
+}
+
+void StackGuard::EnableInterrupts() {
+ ExecutionAccess access(isolate_);
+ if (has_pending_interrupts(access)) {
+ set_interrupt_limits(access);
+ }
+}
+
+void StackGuard::DisableInterrupts() {
+ ExecutionAccess access(isolate_);
+ reset_limits(access);
+}
+
+void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
+ ExecutionAccess access(isolate_);
+ DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
+ if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
+ // Intercept already requested interrupts.
+ int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
+ scope->intercepted_flags_ = intercepted;
+ thread_local_.interrupt_flags_ &= ~intercepted;
+ } else {
+ DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
+ // Restore postponed interrupts.
+ int restored_flags = 0;
+ for (InterruptsScope* current = thread_local_.interrupt_scopes_;
+ current != nullptr; current = current->prev_) {
+ restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
+ current->intercepted_flags_ &= ~scope->intercept_mask_;
+ }
+ thread_local_.interrupt_flags_ |= restored_flags;
+ }
+ if (!has_pending_interrupts(access)) reset_limits(access);
+ // Add scope to the chain.
+ scope->prev_ = thread_local_.interrupt_scopes_;
+ thread_local_.interrupt_scopes_ = scope;
+}
+
+void StackGuard::PopInterruptsScope() {
+ ExecutionAccess access(isolate_);
+ InterruptsScope* top = thread_local_.interrupt_scopes_;
+ DCHECK_NE(top->mode_, InterruptsScope::kNoop);
+ if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
+ // Make intercepted interrupts active.
+ DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
+ thread_local_.interrupt_flags_ |= top->intercepted_flags_;
+ } else {
+ DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
+ // Postpone existing interupts if needed.
+ if (top->prev_) {
+ for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
+ interrupt = interrupt << 1) {
+ InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
+ if ((thread_local_.interrupt_flags_ & flag) &&
+ top->prev_->Intercept(flag)) {
+ thread_local_.interrupt_flags_ &= ~flag;
+ }
+ }
+ }
+ }
+ if (has_pending_interrupts(access)) set_interrupt_limits(access);
+ // Remove scope from chain.
+ thread_local_.interrupt_scopes_ = top->prev_;
+}
+
+bool StackGuard::CheckInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ return thread_local_.interrupt_flags_ & flag;
+}
+
+void StackGuard::RequestInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ // Check the chain of InterruptsScope for interception.
+ if (thread_local_.interrupt_scopes_ &&
+ thread_local_.interrupt_scopes_->Intercept(flag)) {
+ return;
+ }
+
+ // Not intercepted. Set as active interrupt flag.
+ thread_local_.interrupt_flags_ |= flag;
+ set_interrupt_limits(access);
+
+ // If this isolate is waiting in a futex, notify it to wake up.
+ isolate_->futex_wait_list_node()->NotifyWake();
+}
+
+void StackGuard::ClearInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ // Clear the interrupt flag from the chain of InterruptsScope.
+ for (InterruptsScope* current = thread_local_.interrupt_scopes_;
+ current != nullptr; current = current->prev_) {
+ current->intercepted_flags_ &= ~flag;
+ }
+
+ // Clear the interrupt flag from the active interrupt flags.
+ thread_local_.interrupt_flags_ &= ~flag;
+ if (!has_pending_interrupts(access)) reset_limits(access);
+}
+
+int StackGuard::FetchAndClearInterrupts() {
+ ExecutionAccess access(isolate_);
+
+ int result = 0;
+ if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
+ // The TERMINATE_EXECUTION interrupt is special, since it terminates
+ // execution but should leave V8 in a resumable state. If it exists, we only
+ // fetch and clear that bit. On resume, V8 can continue processing other
+ // interrupts.
+ result = TERMINATE_EXECUTION;
+ thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
+ if (!has_pending_interrupts(access)) reset_limits(access);
+ } else {
+ result = thread_local_.interrupt_flags_;
+ thread_local_.interrupt_flags_ = 0;
+ reset_limits(access);
+ }
+
+ return result;
+}
+
+char* StackGuard::ArchiveStackGuard(char* to) {
+ ExecutionAccess access(isolate_);
+ MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ ThreadLocal blank;
+
+ // Set the stack limits using the old thread_local_.
+ // TODO(isolates): This was the old semantics of constructing a ThreadLocal
+ // (as the ctor called SetStackLimits, which looked at the
+ // current thread_local_ from StackGuard)-- but is this
+ // really what was intended?
+ isolate_->heap()->SetStackLimits();
+ thread_local_ = blank;
+
+ return to + sizeof(ThreadLocal);
+}
+
+char* StackGuard::RestoreStackGuard(char* from) {
+ ExecutionAccess access(isolate_);
+ MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ isolate_->heap()->SetStackLimits();
+ return from + sizeof(ThreadLocal);
+}
+
+void StackGuard::FreeThreadResources() {
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ per_thread->set_stack_limit(thread_local_.real_climit_);
+}
+
+void StackGuard::ThreadLocal::Clear() {
+ real_jslimit_ = kIllegalLimit;
+ set_jslimit(kIllegalLimit);
+ real_climit_ = kIllegalLimit;
+ set_climit(kIllegalLimit);
+ interrupt_scopes_ = nullptr;
+ interrupt_flags_ = 0;
+}
+
+bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
+ bool should_set_stack_limits = false;
+ if (real_climit_ == kIllegalLimit) {
+ const uintptr_t kLimitSize = FLAG_stack_size * KB;
+ DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
+ uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
+ real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+ set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
+ real_climit_ = limit;
+ set_climit(limit);
+ should_set_stack_limits = true;
+ }
+ interrupt_scopes_ = nullptr;
+ interrupt_flags_ = 0;
+ return should_set_stack_limits;
+}
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+ thread_local_.Clear();
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+ if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ uintptr_t stored_limit = per_thread->stack_limit();
+ // You should hold the ExecutionAccess lock when you call this.
+ if (stored_limit != 0) {
+ SetStackLimit(stored_limit);
+ }
+}
+
+// --- C a l l s t o n a t i v e s ---
+
+namespace {
+
+bool TestAndClear(int* bitfield, int mask) {
+ bool result = (*bitfield & mask);
+ *bitfield &= ~mask;
+ return result;
+}
+
+class ShouldBeZeroOnReturnScope final {
+ public:
+#ifndef DEBUG
+ explicit ShouldBeZeroOnReturnScope(int*) {}
+#else // DEBUG
+ explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
+ ~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
+
+ private:
+ int* v_;
+#endif // DEBUG
+};
+
+} // namespace
+
+Object StackGuard::HandleInterrupts() {
+ TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
+
+ if (FLAG_verify_predictable) {
+ // Advance synthetic time by making a time request.
+ isolate_->heap()->MonotonicallyIncreasingTimeInMs();
+ }
+
+ // Fetch and clear interrupt bits in one go. See comments inside the method
+ // for special handling of TERMINATE_EXECUTION.
+ int interrupt_flags = FetchAndClearInterrupts();
+
+ // All interrupts should be fully processed when returning from this method.
+ ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
+
+ if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
+ TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
+ return isolate_->TerminateExecution();
+ }
+
+ if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
+ isolate_->heap()->HandleGCRequest();
+ }
+
+ if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "V8.WasmGrowSharedMemory");
+ isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
+ isolate_);
+ }
+
+ if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCDeoptMarkedAllocationSites");
+ isolate_->heap()->DeoptMarkedAllocationSites();
+ }
+
+ if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.InstallOptimizedFunctions");
+ DCHECK(isolate_->concurrent_recompilation_enabled());
+ isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ }
+
+ if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
+ TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
+ // Callbacks must be invoked outside of ExecutionAccess lock.
+ isolate_->InvokeApiInterruptCallbacks();
+ }
+
+ if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
+ isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
+ }
+
+ if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
+ isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
+ }
+
+ isolate_->counters()->stack_interrupts()->Increment();
+ isolate_->counters()->runtime_profiler_ticks()->Increment();
+ isolate_->runtime_profiler()->MarkCandidatesForOptimization();
+
+ return ReadOnlyRoots(isolate_).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
new file mode 100644
index 0000000000..d7477f1623
--- /dev/null
+++ b/deps/v8/src/execution/stack-guard.h
@@ -0,0 +1,186 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_STACK_GUARD_H_
+#define V8_EXECUTION_STACK_GUARD_H_
+
+#include "include/v8-internal.h"
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace internal {
+
+class ExecutionAccess;
+class InterruptsScope;
+class Isolate;
+class Object;
+
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class V8_EXPORT_PRIVATE StackGuard final {
+ public:
+ explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
+
+ // Pass the address beyond which the stack should not grow. The stack
+ // is assumed to grow downwards.
+ void SetStackLimit(uintptr_t limit);
+
+ // The simulator uses a separate JS stack. Limits on the JS stack might have
+ // to be adjusted in order to reflect overflows of the C stack, because we
+ // cannot rely on the interleaving of frames on the simulator.
+ void AdjustStackLimitForSimulator();
+
+ // Threading support.
+ char* ArchiveStackGuard(char* to);
+ char* RestoreStackGuard(char* from);
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
+ void FreeThreadResources();
+ // Sets up the default stack guard for this thread if it has not
+ // already been set up.
+ void InitThread(const ExecutionAccess& lock);
+ // Clears the stack guard for this thread so it does not look as if
+ // it has been set up.
+ void ClearThread(const ExecutionAccess& lock);
+
+#define INTERRUPT_LIST(V) \
+ V(TERMINATE_EXECUTION, TerminateExecution, 0) \
+ V(GC_REQUEST, GC, 1) \
+ V(INSTALL_CODE, InstallCode, 2) \
+ V(API_INTERRUPT, ApiInterrupt, 3) \
+ V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
+ V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
+ V(LOG_WASM_CODE, LogWasmCode, 6) \
+ V(WASM_CODE_GC, WasmCodeGC, 7)
+
+#define V(NAME, Name, id) \
+ inline bool Check##Name() { return CheckInterrupt(NAME); } \
+ inline void Request##Name() { RequestInterrupt(NAME); } \
+ inline void Clear##Name() { ClearInterrupt(NAME); }
+ INTERRUPT_LIST(V)
+#undef V
+
+ // Flag used to set the interrupt causes.
+ enum InterruptFlag {
+#define V(NAME, Name, id) NAME = (1 << id),
+ INTERRUPT_LIST(V)
+#undef V
+#define V(NAME, Name, id) NAME |
+ ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
+#undef V
+ };
+
+ uintptr_t climit() { return thread_local_.climit(); }
+ uintptr_t jslimit() { return thread_local_.jslimit(); }
+ // This provides an asynchronous read of the stack limits for the current
+ // thread. There are no locks protecting this, but it is assumed that you
+ // have the global V8 lock if you are using multiple V8 threads.
+ uintptr_t real_climit() { return thread_local_.real_climit_; }
+ uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
+ Address address_of_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.jslimit_);
+ }
+ Address address_of_real_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
+ }
+
+ // If the stack guard is triggered, but it is not an actual
+ // stack overflow, then handle the interruption accordingly.
+ Object HandleInterrupts();
+
+ private:
+ bool CheckInterrupt(InterruptFlag flag);
+ void RequestInterrupt(InterruptFlag flag);
+ void ClearInterrupt(InterruptFlag flag);
+ int FetchAndClearInterrupts();
+
+ // You should hold the ExecutionAccess lock when calling this method.
+ bool has_pending_interrupts(const ExecutionAccess& lock) {
+ return thread_local_.interrupt_flags_ != 0;
+ }
+
+ // You should hold the ExecutionAccess lock when calling this method.
+ inline void set_interrupt_limits(const ExecutionAccess& lock);
+
+ // Reset limits to actual values. For example after handling interrupt.
+ // You should hold the ExecutionAccess lock when calling this method.
+ inline void reset_limits(const ExecutionAccess& lock);
+
+ // Enable or disable interrupts.
+ void EnableInterrupts();
+ void DisableInterrupts();
+
+#if V8_TARGET_ARCH_64_BIT
+ static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
+ static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
+#else
+ static const uintptr_t kInterruptLimit = 0xfffffffe;
+ static const uintptr_t kIllegalLimit = 0xfffffff8;
+#endif
+
+ void PushInterruptsScope(InterruptsScope* scope);
+ void PopInterruptsScope();
+
+ class ThreadLocal final {
+ public:
+ ThreadLocal() { Clear(); }
+ // You should hold the ExecutionAccess lock when you call Initialize or
+ // Clear.
+ void Clear();
+
+ // Returns true if the heap's stack limits should be set, false if not.
+ bool Initialize(Isolate* isolate);
+
+ // The stack limit is split into a JavaScript and a C++ stack limit. These
+ // two are the same except when running on a simulator where the C++ and
+ // JavaScript stacks are separate. Each of the two stack limits have two
+ // values. The one eith the real_ prefix is the actual stack limit
+ // set for the VM. The one without the real_ prefix has the same value as
+ // the actual stack limit except when there is an interruption (e.g. debug
+ // break or preemption) in which case it is lowered to make stack checks
+ // fail. Both the generated code and the runtime system check against the
+ // one without the real_ prefix.
+ uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
+ uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
+
+ // jslimit_ and climit_ can be read without any lock.
+ // Writing requires the ExecutionAccess lock.
+ base::AtomicWord jslimit_;
+ base::AtomicWord climit_;
+
+ uintptr_t jslimit() {
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
+ }
+ void set_jslimit(uintptr_t limit) {
+ return base::Relaxed_Store(&jslimit_,
+ static_cast<base::AtomicWord>(limit));
+ }
+ uintptr_t climit() {
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
+ }
+ void set_climit(uintptr_t limit) {
+ return base::Relaxed_Store(&climit_,
+ static_cast<base::AtomicWord>(limit));
+ }
+
+ InterruptsScope* interrupt_scopes_;
+ int interrupt_flags_;
+ };
+
+ // TODO(isolates): Technically this could be calculated directly from a
+ // pointer to StackGuard.
+ Isolate* isolate_;
+ ThreadLocal thread_local_;
+
+ friend class Isolate;
+ friend class StackLimitCheck;
+ friend class InterruptsScope;
+
+ DISALLOW_COPY_AND_ASSIGN(StackGuard);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_STACK_GUARD_H_
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.cc b/deps/v8/src/execution/x64/frame-constants-x64.cc
index 2a55fea9c9..716a6d7082 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.cc
+++ b/deps/v8/src/execution/x64/frame-constants-x64.cc
@@ -8,6 +8,7 @@
#include "src/codegen/x64/assembler-x64-inl.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/OWNERS b/deps/v8/src/extensions/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/extensions/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/extensions/cputracemark-extension.cc b/deps/v8/src/extensions/cputracemark-extension.cc
new file mode 100644
index 0000000000..af85130ee8
--- /dev/null
+++ b/deps/v8/src/extensions/cputracemark-extension.cc
@@ -0,0 +1,56 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/extensions/cputracemark-extension.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Local<v8::FunctionTemplate>
+CpuTraceMarkExtension::GetNativeFunctionTemplate(v8::Isolate* isolate,
+ v8::Local<v8::String> str) {
+ return v8::FunctionTemplate::New(isolate, CpuTraceMarkExtension::Mark);
+}
+
+void CpuTraceMarkExtension::Mark(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() < 1 || !args[0]->IsUint32()) {
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(
+ args.GetIsolate(),
+ "First parameter to cputracemark() must be a unsigned int32.",
+ NewStringType::kNormal)
+ .ToLocalChecked());
+ }
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+#if !V8_LIBC_MSVCRT
+ // for non msvc build
+ uint32_t param =
+ args[0]->Uint32Value(args.GetIsolate()->GetCurrentContext()).ToChecked();
+
+ int magic_dummy;
+
+#if defined(__i386__) && defined(__pic__)
+ __asm__ __volatile__("push %%ebx; cpuid; pop %%ebx"
+ : "=a"(magic_dummy)
+ : "a"(0x4711 | ((unsigned)(param) << 16))
+ : "ecx", "edx");
+#else
+ __asm__ __volatile__("cpuid"
+ : "=a"(magic_dummy)
+ : "a"(0x4711 | ((unsigned)(param) << 16))
+ : "ecx", "edx", "ebx");
+#endif // defined(__i386__) && defined(__pic__)
+
+#else
+ // no msvc build support yet.
+#endif //! V8_LIBC_MSVCRT
+
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/cputracemark-extension.h b/deps/v8/src/extensions/cputracemark-extension.h
new file mode 100644
index 0000000000..9110cfe01b
--- /dev/null
+++ b/deps/v8/src/extensions/cputracemark-extension.h
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
+#define V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
+
+#include "include/v8.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class CpuTraceMarkExtension : public v8::Extension {
+ public:
+ explicit CpuTraceMarkExtension(const char* fun_name)
+ : v8::Extension("v8/cpumark",
+ BuildSource(buffer_, sizeof(buffer_), fun_name)) {}
+
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
+
+ private:
+ static void Mark(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ static const char* BuildSource(char* buf, size_t size, const char* fun_name) {
+ SNPrintF(Vector<char>(buf, static_cast<int>(size)), "native function %s();",
+ fun_name);
+ return buf;
+ }
+
+ char buffer_[50];
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 458aec38f3..8f897ae97e 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -124,11 +124,12 @@ void StatisticsExtension::GetCounters(
"amount_of_external_allocated_memory");
args.GetReturnValue().Set(result);
- HeapIterator iterator(reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
+ HeapObjectIterator iterator(
+ reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
int reloc_info_total = 0;
int source_position_table_total = 0;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsCode()) {
Code code = Code::cast(obj);
reloc_info_total += code.relocation_info().Size();
diff --git a/deps/v8/src/flags/OWNERS b/deps/v8/src/flags/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/flags/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 0ef23def1e..40edde3443 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -209,7 +209,9 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
V(harmony_weak_refs, "harmony weak references")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_intl_dateformat_quarter, "Add quarter option to DateTimeFormat")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
@@ -218,11 +220,14 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
#define HARMONY_STAGED_BASE(V)
#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(harmony_intl_add_calendar_numbering_system, \
- "Add calendar and numberingSystem to DateTimeFormat") \
- V(harmony_intl_numberformat_unified, "Unified Intl.NumberFormat Features") \
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_intl_add_calendar_numbering_system, \
+ "Add calendar and numberingSystem to DateTimeFormat") \
+ V(harmony_intl_dateformat_day_period, \
+ "Add dayPeriod option to DateTimeFormat") \
+ V(harmony_intl_dateformat_fractional_second_digits, \
+ "Add fractionalSecondDigits option to DateTimeFormat") \
V(harmony_intl_segmenter, "Intl.Segmenter")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
@@ -235,18 +240,16 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_import_meta, "harmony import.meta property") \
V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_global, "harmony global") \
- V(harmony_object_from_entries, "harmony Object.fromEntries()") \
- V(harmony_hashbang, "harmony hashbang syntax") \
V(harmony_numeric_separator, "harmony numeric separator between digits") \
V(harmony_promise_all_settled, "harmony Promise.allSettled")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
- V(harmony_intl_date_format_range, "DateTimeFormat formatRange") \
- V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
+ V(harmony_intl_date_format_range, "DateTimeFormat formatRange") \
+ V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat") \
+ V(harmony_intl_numberformat_unified, "Unified Intl.NumberFormat Features")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -283,6 +286,12 @@ DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#define V8_ENABLE_RAW_HEAP_SNAPSHOTS_BOOL false
#endif // V8_ENABLE_RAW_HEAP_SNAPSHOTS
+#ifdef V8_ENABLE_DOUBLE_CONST_STORE_CHECK
+#define V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL true
+#else
+#define V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL false
+#endif
+
#ifdef V8_LITE_MODE
#define V8_LITE_BOOL true
#else
@@ -309,6 +318,9 @@ DEFINE_BOOL(future, FUTURE_BOOL,
DEFINE_IMPLICATION(future, write_protect_code_memory)
+DEFINE_BOOL(assert_types, false,
+ "generate runtime type assertions to test the typer")
+
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
@@ -331,6 +343,8 @@ DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
DEFINE_BOOL(trace_block_coverage, false,
"trace collected block coverage information")
+DEFINE_BOOL(trace_protector_invalidation, false,
+ "trace protector cell invalidations")
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
// TODO(jkummerow): This currently adds too much load on the stub cache.
@@ -382,8 +396,7 @@ DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_INT(budget_for_feedback_vector_allocation, 1 * KB,
"The budget in amount of bytecode executed by a function before we "
"decide to allocate feedback vectors")
-DEFINE_BOOL(lazy_feedback_allocation, false, "Allocate feedback vectors lazily")
-DEFINE_IMPLICATION(future, lazy_feedback_allocation)
+DEFINE_BOOL(lazy_feedback_allocation, true, "Allocate feedback vectors lazily")
// Flags for Ignition.
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
@@ -399,6 +412,8 @@ DEFINE_BOOL(print_bytecode, false,
DEFINE_BOOL(enable_lazy_source_positions, false,
"skip generating source positions during initial compile but "
"regenerate when actually required")
+DEFINE_BOOL(stress_lazy_source_positions, false,
+ "collect lazy source positions immediately after lazy compile")
DEFINE_STRING(print_bytecode_filter, "*",
"filter for selecting which functions to print bytecode")
#ifdef V8_TRACE_IGNITION
@@ -476,7 +491,7 @@ DEFINE_BOOL(trace_turbo_trimming, false, "trace TurboFan's graph trimmer")
DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
DEFINE_BOOL(trace_turbo_ceq, false, "trace TurboFan's control equivalence")
DEFINE_BOOL(trace_turbo_loop, false, "trace TurboFan's loop optimizations")
-DEFINE_BOOL(trace_alloc, false, "trace register allocator")
+DEFINE_BOOL(trace_turbo_alloc, false, "trace TurboFan's register allocator")
DEFINE_BOOL(trace_all_uses, false, "trace all use positions")
DEFINE_BOOL(trace_representation, false, "trace representation types")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
@@ -709,8 +724,7 @@ DEFINE_BOOL(wasm_lazy_validation, false,
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_tier_up)
-DEFINE_BOOL(wasm_code_gc, false, "enable garbage collection of wasm code")
-DEFINE_IMPLICATION(future, wasm_code_gc)
+DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code")
DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
DEFINE_BOOL(stress_wasm_code_gc, false,
"stress test garbage collection of wasm code")
@@ -733,11 +747,16 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"Grow the new space based on the percentage of survivors instead "
"of their absolute value.")
DEFINE_SIZE_T(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_SIZE_T(
+ max_heap_size, 0,
+ "max size of the heap (in Mbytes) "
+ "both max_semi_space_size and max_old_space_size take precedence. "
+ "All three flags cannot be specified at the same time.")
DEFINE_BOOL(huge_max_old_generation_size, false,
"Increase max size of the old space to 4 GB for x64 systems with"
"the physical memory bigger than 16 GB")
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
-DEFINE_BOOL(global_gc_scheduling, false,
+DEFINE_BOOL(global_gc_scheduling, true,
"enable GC scheduling based on global memory")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(random_gc_interval, 0,
@@ -760,6 +779,18 @@ DEFINE_BOOL(trace_idle_notification_verbose, false,
DEFINE_BOOL(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_IMPLICATION(trace_gc_verbose, trace_gc)
+DEFINE_BOOL(trace_gc_freelists, false,
+ "prints details of each freelist before and after "
+ "each major garbage collection")
+DEFINE_BOOL(trace_gc_freelists_verbose, false,
+ "prints details of freelists of each page before and after "
+ "each major garbage collection")
+DEFINE_IMPLICATION(trace_gc_freelists_verbose, trace_gc_freelists)
+DEFINE_BOOL(trace_evacuation_candidates, false,
+ "Show statistics about the pages evacuation by the compaction")
+DEFINE_INT(gc_freelist_strategy, 0,
+ "Freelist strategy to use: "
+ "1=FreeListFastAlloc. 2=FreeListMany. Anything else=FreeListLegacy")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
@@ -910,6 +941,8 @@ DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
DEFINE_BOOL(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
+DEFINE_BOOL(enable_sse4_2, true,
+ "enable use of SSE4.2 instructions if available")
DEFINE_BOOL(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_BOOL(enable_avx, true, "enable use of AVX instructions if available")
@@ -967,6 +1000,8 @@ DEFINE_BOOL(experimental_stack_trace_frames, false,
DEFINE_BOOL(disallow_code_generation_from_strings, false,
"disallow eval and friends")
DEFINE_BOOL(expose_async_hooks, false, "expose async_hooks object")
+DEFINE_STRING(expose_cputracemark_as, nullptr,
+ "expose cputracemark extension under the specified name")
// builtins.cc
DEFINE_BOOL(allow_unsafe_function_constructor, false,
@@ -1184,6 +1219,12 @@ DEFINE_FLOAT(testing_float_flag, 2.5, "float-flag")
DEFINE_STRING(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness")
+// Test flag for a check in %OptimizeFunctionOnNextCall
+DEFINE_BOOL(
+ testing_d8_test_runner, false,
+ "test runner turns on this flag to enable a check that the funciton was "
+ "prepared for optimization before marking it for optimization")
+
// mksnapshot.cc
DEFINE_STRING(embedded_src, nullptr,
"Path for the generated embedded data file. (mksnapshot only)")
diff --git a/deps/v8/src/handles/OWNERS b/deps/v8/src/handles/OWNERS
index 57fcdd4fac..57f0b54262 100644
--- a/deps/v8/src/handles/OWNERS
+++ b/deps/v8/src/handles/OWNERS
@@ -1,4 +1,7 @@
ishell@chromium.org
jkummerow@chromium.org
mlippautz@chromium.org
+ulan@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index e0a1f23b7b..7f320a271c 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -33,8 +33,8 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
Object object(*location_);
if (object.IsSmi()) return true;
HeapObject heap_object = HeapObject::cast(object);
- Isolate* isolate;
- if (!GetIsolateFromWritableObject(heap_object, &isolate)) return true;
+ if (IsReadOnlyHeapObject(heap_object)) return true;
+ Isolate* isolate = GetIsolateFromWritableObject(heap_object);
RootIndex root_index;
if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 79eea3aaab..d826296e0c 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
hpayer@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 61b5ba1f8c..65d3f4a732 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -57,8 +57,6 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
heap->update_external_memory(-static_cast<intptr_t>(length));
}
-Space* LocalArrayBufferTracker::space() { return page_->owner(); }
-
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index e8ca57b543..b7950c2506 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -117,8 +117,6 @@ class LocalArrayBufferTracker {
// logic for updating external memory counters.
inline void AddInternal(JSArrayBuffer buffer, size_t length);
- inline Space* space();
-
Page* page_;
// The set contains raw heap pointers which are removed by the GC upon
// processing the tracker through its owning page.
diff --git a/deps/v8/src/heap/basic-memory-chunk.cc b/deps/v8/src/heap/basic-memory-chunk.cc
new file mode 100644
index 0000000000..307f0ec973
--- /dev/null
+++ b/deps/v8/src/heap/basic-memory-chunk.cc
@@ -0,0 +1,54 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/basic-memory-chunk.h"
+
+#include <cstdlib>
+
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/slots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Verify write barrier offsets match the the real offsets.
+STATIC_ASSERT(BasicMemoryChunk::Flag::INCREMENTAL_MARKING ==
+ heap_internals::MemoryChunk::kMarkingBit);
+STATIC_ASSERT(BasicMemoryChunk::Flag::FROM_PAGE ==
+ heap_internals::MemoryChunk::kFromPageBit);
+STATIC_ASSERT(BasicMemoryChunk::Flag::TO_PAGE ==
+ heap_internals::MemoryChunk::kToPageBit);
+STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
+ heap_internals::MemoryChunk::kFlagsOffset);
+STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
+ heap_internals::MemoryChunk::kHeapOffset);
+
+BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
+ Address area_end) {
+ const Address base = reinterpret_cast<Address>(this);
+ size_ = size;
+ marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+ header_sentinel_ = HeapObject::FromAddress(base).ptr();
+ DCHECK(HasHeaderSentinel(area_start));
+ area_start_ = area_start;
+ area_end_ = area_end;
+}
+
+// static
+bool BasicMemoryChunk::HasHeaderSentinel(Address slot_addr) {
+ Address base = BaseAddress(slot_addr);
+ if (slot_addr < base + kHeaderSize) return false;
+ return HeapObject::FromAddress(base) ==
+ ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
+}
+
+void BasicMemoryChunk::ReleaseMarkingBitmap() {
+ DCHECK_NOT_NULL(marking_bitmap_);
+ free(marking_bitmap_);
+ marking_bitmap_ = nullptr;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
new file mode 100644
index 0000000000..65fc072bd2
--- /dev/null
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -0,0 +1,229 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASIC_MEMORY_CHUNK_H_
+#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
+
+#include <type_traits>
+
+#include "src/base/atomic-utils.h"
+#include "src/common/globals.h"
+#include "src/heap/marking.h"
+
+namespace v8 {
+namespace internal {
+
+class MemoryChunk;
+
+class BasicMemoryChunk {
+ public:
+ enum Flag {
+ NO_FLAGS = 0u,
+ IS_EXECUTABLE = 1u << 0,
+ POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
+ POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
+ // A page in the from-space or a young large page that was not scavenged
+ // yet.
+ FROM_PAGE = 1u << 3,
+ // A page in the to-space or a young large page that was scavenged.
+ TO_PAGE = 1u << 4,
+ LARGE_PAGE = 1u << 5,
+ EVACUATION_CANDIDATE = 1u << 6,
+ NEVER_EVACUATE = 1u << 7,
+
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR = 1u << 8,
+
+ // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
+ // from new to old space during evacuation.
+ PAGE_NEW_OLD_PROMOTION = 1u << 9,
+
+ // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
+ // within the new space during evacuation.
+ PAGE_NEW_NEW_PROMOTION = 1u << 10,
+
+ // This flag is intended to be used for testing. Works only when both
+ // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+ // are set. It forces the page to become an evacuation candidate at next
+ // candidates selection cycle.
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
+
+ // This flag is intended to be used for testing.
+ NEVER_ALLOCATE_ON_PAGE = 1u << 12,
+
+ // The memory chunk is already logically freed, however the actual freeing
+ // still has to be performed.
+ PRE_FREED = 1u << 13,
+
+ // |POOLED|: When actually freeing this chunk, only uncommit and do not
+ // give up the reservation as we still reuse the chunk at some point.
+ POOLED = 1u << 14,
+
+ // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+ // has been aborted and needs special handling by the sweeper.
+ COMPACTION_WAS_ABORTED = 1u << 15,
+
+ // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
+ // on pages is sometimes aborted. The flag is used to avoid repeatedly
+ // triggering on the same page.
+ COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
+
+ // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
+ // to iterate the page.
+ SWEEP_TO_ITERATE = 1u << 17,
+
+ // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
+ // enabled.
+ INCREMENTAL_MARKING = 1u << 18,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
+
+ // The memory chunk freeing bookkeeping has been performed but the chunk has
+ // not yet been freed.
+ UNREGISTERED = 1u << 20,
+
+ // The memory chunk belongs to the read-only heap and does not participate
+ // in garbage collection. This is used instead of owner for identity
+ // checking since read-only chunks have no owner once they are detached.
+ READ_ONLY_HEAP = 1u << 21,
+ };
+
+ static const intptr_t kAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static const intptr_t kAlignmentMask = kAlignment - 1;
+
+ BasicMemoryChunk(size_t size, Address area_start, Address area_end);
+
+ static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
+
+ Address address() const { return reinterpret_cast<Address>(this); }
+
+ size_t size() const { return size_; }
+ void set_size(size_t size) { size_ = size; }
+
+ Address area_start() const { return area_start_; }
+
+ Address area_end() const { return area_end_; }
+ void set_area_end(Address area_end) { area_end_ = area_end; }
+
+ size_t area_size() const {
+ return static_cast<size_t>(area_end() - area_start());
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ void SetFlag(Flag flag) {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ flags_ |= flag;
+ } else {
+ base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsFlagSet(Flag flag) const {
+ return (GetFlags<access_mode>() & flag) != 0;
+ }
+
+ void ClearFlag(Flag flag) { flags_ &= ~flag; }
+
+ // Set or clear multiple flags at a time. The flags in the mask are set to
+ // the value in "flags", the rest retain the current value in |flags_|.
+ void SetFlags(uintptr_t flags, uintptr_t mask) {
+ flags_ = (flags_ & ~mask) | (flags & mask);
+ }
+
+ // Return all current flags.
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ uintptr_t GetFlags() const {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ return flags_;
+ } else {
+ return base::AsAtomicWord::Relaxed_Load(&flags_);
+ }
+ }
+
+ bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
+
+ // TODO(v8:7464): Add methods for down casting to MemoryChunk.
+
+ bool Contains(Address addr) const {
+ return addr >= area_start() && addr < area_end();
+ }
+
+ // Checks whether |addr| can be a limit of addresses in this page. It's a
+ // limit if it's in the page, or if it's just after the last byte of the page.
+ bool ContainsLimit(Address addr) const {
+ return addr >= area_start() && addr <= area_end();
+ }
+
+ V8_EXPORT_PRIVATE static bool HasHeaderSentinel(Address slot_addr);
+
+ void ReleaseMarkingBitmap();
+
+ static const intptr_t kSizeOffset = 0;
+ static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
+ static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
+ static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
+ static const intptr_t kHeaderSentinelOffset =
+ kHeapOffset + kSystemPointerSize;
+
+ static const size_t kHeaderSize =
+ kSizeOffset + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address header_sentinel_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize; // Address area_end_
+
+ protected:
+ // Overall size of the chunk, including the header and guards.
+ size_t size_;
+
+ uintptr_t flags_ = NO_FLAGS;
+
+ Bitmap* marking_bitmap_ = nullptr;
+
+ // TODO(v8:7464): Find a way to remove this.
+ // This goes against the spirit for the BasicMemoryChunk, but until C++14/17
+ // is the default it needs to live here because MemoryChunk is not standard
+ // layout under C++11.
+ Heap* heap_;
+
+ // This is used to distinguish the memory chunk header from the interior of a
+ // large page. The memory chunk header stores here an impossible tagged
+ // pointer: the tagger pointer of the page start. A field in a large object is
+ // guaranteed to not contain such a pointer.
+ Address header_sentinel_;
+
+ // Start and end of allocatable memory on this chunk.
+ Address area_start_;
+ Address area_end_;
+
+ friend class BasicMemoryChunkValidator;
+};
+
+STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
+
+class BasicMemoryChunkValidator {
+ // Computed offsets should match the compiler generated ones.
+ STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
+ offsetof(BasicMemoryChunk, size_));
+ STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
+ offsetof(BasicMemoryChunk, flags_));
+ STATIC_ASSERT(BasicMemoryChunk::kMarkBitmapOffset ==
+ offsetof(BasicMemoryChunk, marking_bitmap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
+ offsetof(BasicMemoryChunk, heap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
+ offsetof(BasicMemoryChunk, header_sentinel_));
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BASIC_MEMORY_CHUNK_H_
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index cb34d732a4..c6c111bc0e 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -6,7 +6,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
-#include "src/heap/spaces-inl.h" // For HeapObjectIterator.
+#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -61,7 +61,7 @@ void CodeStatistics::ResetCodeAndMetadataStatistics(Isolate* isolate) {
// - by code comment (only in debug mode)
void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
Isolate* isolate) {
- HeapObjectIterator obj_it(space);
+ PagedSpaceObjectIterator obj_it(space);
for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
@@ -73,7 +73,7 @@ void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
// - by code comment (only in debug mode)
void CodeStatistics::CollectCodeStatistics(LargeObjectSpace* space,
Isolate* isolate) {
- LargeObjectIterator obj_it(space);
+ LargeObjectSpaceObjectIterator obj_it(space);
for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
diff --git a/deps/v8/src/heap/combined-heap.cc b/deps/v8/src/heap/combined-heap.cc
index ed60b438cb..0416bb62a4 100644
--- a/deps/v8/src/heap/combined-heap.cc
+++ b/deps/v8/src/heap/combined-heap.cc
@@ -3,16 +3,22 @@
// found in the LICENSE file.
#include "src/heap/combined-heap.h"
+#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
-HeapObject CombinedHeapIterator::Next() {
+CombinedHeapObjectIterator::CombinedHeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
+ : heap_iterator_(heap, filtering),
+ ro_heap_iterator_(heap->isolate()->read_only_heap()) {}
+
+HeapObject CombinedHeapObjectIterator::Next() {
HeapObject object = ro_heap_iterator_.Next();
if (!object.is_null()) {
return object;
}
- return heap_iterator_.next();
+ return heap_iterator_.Next();
}
} // namespace internal
diff --git a/deps/v8/src/heap/combined-heap.h b/deps/v8/src/heap/combined-heap.h
index c331d95c3d..eaa012ec18 100644
--- a/deps/v8/src/heap/combined-heap.h
+++ b/deps/v8/src/heap/combined-heap.h
@@ -13,21 +13,19 @@ namespace v8 {
namespace internal {
// This class allows iteration over the entire heap (Heap and ReadOnlyHeap). It
-// uses the HeapIterator to iterate over non-read-only objects and accepts the
-// same filtering option. (Interrupting iteration while filtering unreachable
-// objects is still forbidden)
-class V8_EXPORT_PRIVATE CombinedHeapIterator final {
+// uses the HeapObjectIterator to iterate over non-read-only objects and accepts
+// the same filtering option. (Interrupting iteration while filtering
+// unreachable objects is still forbidden)
+class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
public:
- CombinedHeapIterator(Heap* heap,
- HeapIterator::HeapObjectsFiltering filtering =
- HeapIterator::HeapObjectsFiltering::kNoFiltering)
- : heap_iterator_(heap, filtering),
- ro_heap_iterator_(heap->read_only_heap()) {}
+ CombinedHeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering =
+ HeapObjectIterator::HeapObjectsFiltering::kNoFiltering);
HeapObject Next();
private:
- HeapIterator heap_iterator_;
- ReadOnlyHeapIterator ro_heap_iterator_;
+ HeapObjectIterator heap_iterator_;
+ ReadOnlyHeapObjectIterator ro_heap_iterator_;
};
V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 8ce96428e1..12bb28f1c8 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -121,11 +121,7 @@ class ConcurrentMarkingVisitor final
void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object) {
#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race
- // in mark-bit initialization. See MemoryChunk::Initialize for the
- // corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
if (marking_state_.IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
@@ -247,7 +243,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(weak_cell)) return 0;
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
- VisitMapPointer(weak_cell, weak_cell.map_slot());
+ VisitMapPointer(weak_cell);
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
@@ -306,13 +302,13 @@ class ConcurrentMarkingVisitor final
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
return SeqOneByteString::SizeFor(object.synchronized_length());
}
int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
return SeqTwoByteString::SizeFor(object.synchronized_length());
}
@@ -367,7 +363,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
- VisitMapPointer(shared_info, shared_info.map_slot());
+ VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
this);
@@ -385,7 +381,7 @@ class ConcurrentMarkingVisitor final
int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
if (!is_forced_gc_) {
object.MakeOlder();
@@ -453,7 +449,7 @@ class ConcurrentMarkingVisitor final
int VisitDescriptorArray(Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array.map_slot());
+ VisitMapPointer(array);
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array.GetFirstPointerSlot(),
array.GetDescriptorSlot(0));
@@ -463,7 +459,7 @@ class ConcurrentMarkingVisitor final
int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array.map_slot());
+ VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
@@ -528,11 +524,7 @@ class ConcurrentMarkingVisitor final
void MarkObject(HeapObject object) {
#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race
- // in mark-bit initialization. See MemoryChunk::Initialize for the
- // corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(object)->SynchronizedHeapLoad();
#endif
if (marking_state_.WhiteToGrey(object)) {
shared_.Push(object);
@@ -631,7 +623,7 @@ class ConcurrentMarkingVisitor final
// Left trimming marks the array black before over-writing the length.
DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
@@ -656,7 +648,7 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor>
const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
SlotSnapshottingVisitor visitor(&slot_snapshot_);
- visitor.VisitPointer(object, ObjectSlot(object.map_slot().address()));
+ visitor.VisitPointer(object, object.map_slot());
TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index c032f384b3..ab91367bc6 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -34,7 +34,7 @@ void LocalEmbedderHeapTracer::TraceEpilogue() {
EmbedderHeapTracer::TraceSummary summary;
remote_tracer_->TraceEpilogue(&summary);
- remote_stats_.allocated_size = summary.allocated_size;
+ remote_stats_.used_size = summary.allocated_size;
// Force a check next time increased memory is reported. This allows for
// setting limits close to actual heap sizes.
remote_stats_.allocated_size_limit_for_check = 0;
@@ -118,6 +118,10 @@ void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
heap->StartIncrementalMarkingIfAllocationLimitIsReached(
heap->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
+ if (heap->AllocationLimitOvershotByLargeMargin()) {
+ heap->FinalizeIncrementalMarkingAtomically(
+ i::GarbageCollectionReason::kExternalFinalize);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 4309fb722a..eae29cbf5c 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -77,8 +77,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
void IncreaseAllocatedSize(size_t bytes) {
+ remote_stats_.used_size += bytes;
remote_stats_.allocated_size += bytes;
- remote_stats_.accumulated_allocated_size += bytes;
if (remote_stats_.allocated_size >
remote_stats_.allocated_size_limit_for_check) {
StartIncrementalMarkingIfNeeded();
@@ -87,12 +87,15 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
}
+ void DecreaseAllocatedSize(size_t bytes) {
+ DCHECK_GE(remote_stats_.used_size, bytes);
+ remote_stats_.used_size -= bytes;
+ }
+
void StartIncrementalMarkingIfNeeded();
+ size_t used_size() const { return remote_stats_.used_size; }
size_t allocated_size() const { return remote_stats_.allocated_size; }
- size_t accumulated_allocated_size() const {
- return remote_stats_.accumulated_allocated_size;
- }
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -109,16 +112,16 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool embedder_worklist_empty_ = false;
struct RemoteStatistics {
- // Allocated size of objects in bytes reported by the embedder. Updated via
+ // Used size of objects in bytes reported by the embedder. Updated via
// TraceSummary at the end of tracing and incrementally when the GC is not
// in progress.
+ size_t used_size = 0;
+ // Totally bytes allocated by the embedder. Monotonically
+ // increasing value. Used to approximate allocation rate.
size_t allocated_size = 0;
- // Limit for |allocated_size_| in bytes to avoid checking for starting a GC
+ // Limit for |allocated_size| in bytes to avoid checking for starting a GC
// on each increment.
size_t allocated_size_limit_for_check = 0;
- // Totally accumulated bytes allocated by the embedder. Monotonically
- // increasing value. Used to approximate allocation rate.
- size_t accumulated_allocated_size = 0;
} remote_stats_;
friend class EmbedderStackStateScope;
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 32237da877..9aa705047c 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -104,6 +104,15 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
allocation);
}
+Handle<JSObject> Factory::NewFastOrSlowJSObjectFromMap(
+ Handle<Map> map, int number_of_slow_properties, AllocationType allocation,
+ Handle<AllocationSite> allocation_site) {
+ return map->is_dictionary_map()
+ ? NewSlowJSObjectFromMap(map, number_of_slow_properties,
+ allocation, allocation_site)
+ : NewJSObjectFromMap(map, allocation, allocation_site);
+}
+
Handle<Object> Factory::NewURIError() {
return NewError(isolate()->uri_error_function(),
MessageTemplate::kURIMalformed);
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 03896f7827..19c3665622 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -580,7 +580,7 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
if (has_different_size_backing_store) {
DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
has_seen_proto);
- description->set_backing_store_size(isolate(), backing_store_size);
+ description->set_backing_store_size(backing_store_size);
}
description->set_flags(0);
@@ -1232,8 +1232,8 @@ Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
result->set_hash_field(String::kEmptyHashField);
result->set_length(length);
- result->set_first(isolate(), *left, mode);
- result->set_second(isolate(), *right, mode);
+ result->set_first(*left, mode);
+ result->set_second(*right, mode);
return result;
}
@@ -1314,7 +1314,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
slice->set_hash_field(String::kEmptyHashField);
slice->set_length(length);
- slice->set_parent(isolate(), *str);
+ slice->set_parent(*str);
slice->set_offset(offset);
return slice;
}
@@ -1483,7 +1483,7 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
return context_table;
}
-Handle<Context> Factory::NewModuleContext(Handle<Module> module,
+Handle<Context> Factory::NewModuleContext(Handle<SourceTextModule> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
@@ -1611,17 +1611,7 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
Handle<Struct> Factory::NewStruct(InstanceType type,
AllocationType allocation) {
- Map map;
- switch (type) {
-#define MAKE_CASE(TYPE, Name, name) \
- case TYPE: \
- map = *name##_map(); \
- break;
- STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- }
+ Map map = Map::GetStructMap(isolate(), type);
int size = map.instance_size();
HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Struct> str(Struct::cast(result), isolate());
@@ -1640,10 +1630,17 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(
NewStruct(ACCESSOR_INFO_TYPE, AllocationType::kOld));
+ DisallowHeapAllocation no_gc;
info->set_name(*empty_string());
info->set_flags(0); // Must clear the flags, it was initialized as undefined.
info->set_is_sloppy(true);
info->set_initial_property_attributes(NONE);
+
+ // Clear some other fields that should not be undefined.
+ info->set_getter(Smi::kZero);
+ info->set_setter(Smi::kZero);
+ info->set_js_getter(Smi::kZero);
+
return info;
}
@@ -1970,15 +1967,15 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
// |layout_descriptor| are set.
map.set_visitor_id(Map::GetVisitorId(map));
map.set_bit_field(0);
- map.set_bit_field2(Map::IsExtensibleBit::kMask);
+ map.set_bit_field2(Map::NewTargetIsBaseBit::encode(true));
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptorsBit::encode(true) |
- Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking) |
+ Map::IsExtensibleBit::encode(true);
map.set_bit_field3(bit_field3);
DCHECK(!map.is_in_retained_map_list());
map.clear_padding();
map.set_elements_kind(elements_kind);
- map.set_new_target_is_base(true);
isolate()->counters()->maps_created()->Increment();
if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
return map;
@@ -2293,9 +2290,9 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
// as the result.
Handle<Object> no_caller;
- MaybeHandle<Object> maybe_error =
- ErrorUtils::Construct(isolate(), constructor, constructor, message,
- SKIP_NONE, no_caller, false);
+ MaybeHandle<Object> maybe_error = ErrorUtils::Construct(
+ isolate(), constructor, constructor, message, SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kDetailed);
if (maybe_error.is_null()) {
DCHECK(isolate()->has_pending_exception());
maybe_error = handle(isolate()->pending_exception(), isolate());
@@ -2341,7 +2338,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<JSFunction> function(JSFunction::cast(New(map, allocation)),
isolate());
- function->initialize_properties();
+ function->initialize_properties(isolate());
function->initialize_elements();
function->set_shared(*info);
function->set_code(info->GetCode());
@@ -2563,9 +2560,10 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
AllocationType::kOld);
}
-Handle<ModuleInfo> Factory::NewModuleInfo() {
- return NewFixedArrayWithMap<ModuleInfo>(
- RootIndex::kModuleInfoMap, ModuleInfo::kLength, AllocationType::kOld);
+Handle<SourceTextModuleInfo> Factory::NewSourceTextModuleInfo() {
+ return NewFixedArrayWithMap<SourceTextModuleInfo>(
+ RootIndex::kModuleInfoMap, SourceTextModuleInfo::kLength,
+ AllocationType::kOld);
}
Handle<PreparseData> Factory::NewPreparseData(int data_length,
@@ -2585,15 +2583,14 @@ Handle<PreparseData> Factory::NewPreparseData(int data_length,
Handle<UncompiledDataWithoutPreparseData>
Factory::NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id) {
+ int32_t end_position) {
Handle<UncompiledDataWithoutPreparseData> result(
UncompiledDataWithoutPreparseData::cast(New(
uncompiled_data_without_preparse_data_map(), AllocationType::kOld)),
isolate());
UncompiledData::Initialize(*result, *inferred_name, start_position,
- end_position, function_literal_id);
+ end_position);
return result;
}
@@ -2601,7 +2598,6 @@ Handle<UncompiledDataWithPreparseData>
Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
int32_t start_position,
int32_t end_position,
- int32_t function_literal_id,
Handle<PreparseData> preparse_data) {
Handle<UncompiledDataWithPreparseData> result(
UncompiledDataWithPreparseData::cast(
@@ -2609,8 +2605,7 @@ Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
isolate());
UncompiledDataWithPreparseData::Initialize(
- *result, *inferred_name, start_position, end_position,
- function_literal_id, *preparse_data);
+ *result, *inferred_name, start_position, end_position, *preparse_data);
return result;
}
@@ -2755,7 +2750,7 @@ Handle<JSObject> Factory::NewJSObjectWithNullProto(AllocationType allocation) {
Handle<Map> new_map = Map::Copy(
isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
Map::SetPrototype(isolate(), new_map, null_value());
- JSObject::MigrateToMap(result, new_map);
+ JSObject::MigrateToMap(isolate(), result, new_map);
return result;
}
@@ -2886,12 +2881,14 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
return js_obj;
}
-Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
- AllocationType allocation) {
+Handle<JSObject> Factory::NewSlowJSObjectFromMap(
+ Handle<Map> map, int capacity, AllocationType allocation,
+ Handle<AllocationSite> allocation_site) {
DCHECK(map->is_dictionary_map());
Handle<NameDictionary> object_properties =
NameDictionary::New(isolate(), capacity);
- Handle<JSObject> js_object = NewJSObjectFromMap(map, allocation);
+ Handle<JSObject> js_object =
+ NewJSObjectFromMap(map, allocation, allocation_site);
js_object->set_raw_properties_or_hash(*object_properties);
return js_object;
}
@@ -2910,43 +2907,54 @@ Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
DCHECK(elements->IsNumberDictionary());
object_map =
JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
- JSObject::MigrateToMap(object, object_map);
+ JSObject::MigrateToMap(isolate(), object, object_map);
object->set_elements(*elements);
}
return object;
}
-Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
- AllocationType allocation) {
- NativeContext native_context = isolate()->raw_native_context();
- Map map = native_context.GetInitialJSArrayMap(elements_kind);
- if (map.is_null()) {
- JSFunction array_function = native_context.array_function();
- map = array_function.initial_map();
- }
- return Handle<JSArray>::cast(
- NewJSObjectFromMap(handle(map, isolate()), allocation));
-}
-
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
int capacity,
ArrayStorageAllocationMode mode,
AllocationType allocation) {
- Handle<JSArray> array = NewJSArray(elements_kind, allocation);
- NewJSArrayStorage(array, length, capacity, mode);
- return array;
+ DCHECK(capacity >= length);
+ if (capacity == 0) {
+ return NewJSArrayWithElements(empty_fixed_array(), elements_kind, length,
+ allocation);
+ }
+
+ HandleScope inner_scope(isolate());
+ Handle<FixedArrayBase> elms =
+ NewJSArrayStorage(elements_kind, capacity, mode);
+ return inner_scope.CloseAndEscape(NewJSArrayWithUnverifiedElements(
+ elms, elements_kind, length, allocation));
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
int length,
AllocationType allocation) {
- DCHECK(length <= elements->length());
- Handle<JSArray> array = NewJSArray(elements_kind, allocation);
+ Handle<JSArray> array = NewJSArrayWithUnverifiedElements(
+ elements, elements_kind, length, allocation);
+ JSObject::ValidateElements(*array);
+ return array;
+}
+Handle<JSArray> Factory::NewJSArrayWithUnverifiedElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
+ AllocationType allocation) {
+ DCHECK(length <= elements->length());
+ NativeContext native_context = isolate()->raw_native_context();
+ Map map = native_context.GetInitialJSArrayMap(elements_kind);
+ if (map.is_null()) {
+ JSFunction array_function = native_context.array_function();
+ map = array_function.initial_map();
+ }
+ Handle<JSArray> array = Handle<JSArray>::cast(
+ NewJSObjectFromMap(handle(map, isolate()), allocation));
+ DisallowHeapAllocation no_gc;
array->set_elements(*elements);
array->set_length(Smi::FromInt(length));
- JSObject::ValidateElements(*array);
return array;
}
@@ -2961,8 +2969,17 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
}
HandleScope inner_scope(isolate());
+ Handle<FixedArrayBase> elms =
+ NewJSArrayStorage(array->GetElementsKind(), capacity, mode);
+
+ array->set_elements(*elms);
+ array->set_length(Smi::FromInt(length));
+}
+
+Handle<FixedArrayBase> Factory::NewJSArrayStorage(
+ ElementsKind elements_kind, int capacity, ArrayStorageAllocationMode mode) {
+ DCHECK_GT(capacity, 0);
Handle<FixedArrayBase> elms;
- ElementsKind elements_kind = array->GetElementsKind();
if (IsDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
elms = NewFixedDoubleArray(capacity);
@@ -2979,9 +2996,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
elms = NewFixedArrayWithHoles(capacity);
}
}
-
- array->set_elements(*elms);
- array->set_length(Smi::FromInt(length));
+ return elms;
}
Handle<JSWeakMap> Factory::NewJSWeakMap() {
@@ -3020,9 +3035,10 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
return Handle<JSGeneratorObject>::cast(NewJSObjectFromMap(map));
}
-Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
- Handle<ModuleInfo> module_info(code->scope_info().ModuleDescriptorInfo(),
- isolate());
+Handle<SourceTextModule> Factory::NewSourceTextModule(
+ Handle<SharedFunctionInfo> code) {
+ Handle<SourceTextModuleInfo> module_info(
+ code->scope_info().ModuleDescriptorInfo(), isolate());
Handle<ObjectHashTable> exports =
ObjectHashTable::New(isolate(), module_info->RegularExportCount());
Handle<FixedArray> regular_exports =
@@ -3035,8 +3051,10 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
: empty_fixed_array();
ReadOnlyRoots roots(isolate());
- Handle<Module> module =
- Handle<Module>::cast(NewStruct(MODULE_TYPE, AllocationType::kOld));
+ Handle<SourceTextModule> module(
+ SourceTextModule::cast(
+ New(source_text_module_map(), AllocationType::kOld)),
+ isolate());
module->set_code(*code);
module->set_exports(*exports);
module->set_regular_exports(*regular_exports);
@@ -3053,6 +3071,28 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
return module;
}
+Handle<SyntheticModule> Factory::NewSyntheticModule(
+ Handle<String> module_name, Handle<FixedArray> export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
+ ReadOnlyRoots roots(isolate());
+ Handle<SyntheticModule> module(
+ SyntheticModule::cast(New(synthetic_module_map(), AllocationType::kOld)),
+ isolate());
+ Handle<ObjectHashTable> exports =
+ ObjectHashTable::New(isolate(), static_cast<int>(export_names->length()));
+ Handle<Foreign> evaluation_steps_foreign =
+ NewForeign(reinterpret_cast<i::Address>(evaluation_steps));
+ module->set_exports(*exports);
+ module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+ module->set_module_namespace(roots.undefined_value());
+ module->set_status(Module::kUninstantiated);
+ module->set_exception(roots.the_hole_value());
+ module->set_name(*module_name);
+ module->set_export_names(*export_names);
+ module->set_evaluation_steps(*evaluation_steps_foreign);
+ return module;
+}
+
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
AllocationType allocation) {
Handle<JSFunction> array_buffer_fun(
@@ -3274,7 +3314,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
DCHECK(map->prototype().IsNull(isolate()));
Handle<JSProxy> result(JSProxy::cast(New(map, AllocationType::kYoung)),
isolate());
- result->initialize_properties();
+ result->initialize_properties(isolate());
result->set_target(*target);
result->set_handler(*handler);
return result;
@@ -3335,10 +3375,12 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
false);
TRACE_EVENT_OBJECT_CREATED_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
- TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()));
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope,
+ shared->TraceID(literal)));
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
- TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()),
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope,
+ shared->TraceID(literal)),
shared->ToTracedValue(literal));
return shared;
}
@@ -3447,6 +3489,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
*empty_feedback_metadata(), SKIP_WRITE_BARRIER);
}
share->set_script_or_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_function_literal_id(kFunctionLiteralIdInvalid);
#if V8_SFI_HAS_UNIQUE_ID
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -3639,68 +3682,82 @@ Handle<StackTraceFrame> Factory::NewStackTraceFrame(
return frame;
}
-Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
- Handle<StackFrameInfo> stack_frame_info = Handle<StackFrameInfo>::cast(
- NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
- stack_frame_info->set_line_number(0);
- stack_frame_info->set_column_number(0);
- stack_frame_info->set_script_id(0);
- stack_frame_info->set_promise_all_index(-1);
- stack_frame_info->set_script_name(*null_value());
- stack_frame_info->set_script_name_or_source_url(*null_value());
- stack_frame_info->set_function_name(*null_value());
- stack_frame_info->set_flag(0);
- return stack_frame_info;
-}
-
Handle<StackFrameInfo> Factory::NewStackFrameInfo(
Handle<FrameArray> frame_array, int index) {
FrameArrayIterator it(isolate(), frame_array, index);
DCHECK(it.HasFrame());
- Handle<StackFrameInfo> info = NewStackFrameInfo();
- info->set_flag(0);
-
const bool is_wasm = frame_array->IsAnyWasmFrame(index);
- info->set_is_wasm(is_wasm);
+ StackFrameBase* frame = it.Frame();
- // Line numbers are 1-based, for Wasm we need to adjust.
- int line = it.Frame()->GetLineNumber();
- if (is_wasm && line >= 0) line++;
- info->set_line_number(line);
+ int line = frame->GetLineNumber();
+ int column = frame->GetColumnNumber();
- // Column numbers are 1-based. For Wasm we use the position
- // as the iterator does not currently provide a column number.
- const int column =
- is_wasm ? it.Frame()->GetPosition() + 1 : it.Frame()->GetColumnNumber();
- info->set_column_number(column);
+ const int script_id = frame->GetScriptId();
- info->set_script_id(it.Frame()->GetScriptId());
- info->set_script_name(*it.Frame()->GetFileName());
- info->set_script_name_or_source_url(*it.Frame()->GetScriptNameOrSourceUrl());
+ Handle<Object> script_name = frame->GetFileName();
+ Handle<Object> script_or_url = frame->GetScriptNameOrSourceUrl();
// TODO(szuend): Adjust this, once it is decided what name to use in both
// "simple" and "detailed" stack traces. This code is for
// backwards compatibility to fullfill test expectations.
- auto function_name = it.Frame()->GetFunctionName();
+ auto function_name = frame->GetFunctionName();
+ bool is_user_java_script = false;
if (!is_wasm) {
- Handle<Object> function = it.Frame()->GetFunction();
+ Handle<Object> function = frame->GetFunction();
if (function->IsJSFunction()) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(function);
- function_name = JSFunction::GetDebugName(fun);
- const bool is_user_java_script = fun->shared().IsUserJavaScript();
- info->set_is_user_java_script(is_user_java_script);
+ is_user_java_script = fun->shared().IsUserJavaScript();
}
}
+
+ Handle<Object> method_name = undefined_value();
+ Handle<Object> type_name = undefined_value();
+ Handle<Object> eval_origin = frame->GetEvalOrigin();
+ Handle<Object> wasm_module_name = frame->GetWasmModuleName();
+
+ // MethodName and TypeName are expensive to look up, so they are only
+ // included when they are strictly needed by the stack trace
+ // serialization code.
+ // Note: The {is_method_call} predicate needs to be kept in sync with
+ // the corresponding predicate in the stack trace serialization code
+ // in stack-frame-info.cc.
+ const bool is_toplevel = frame->IsToplevel();
+ const bool is_constructor = frame->IsConstructor();
+ const bool is_method_call = !(is_toplevel || is_constructor);
+ if (is_method_call) {
+ method_name = frame->GetMethodName();
+ type_name = frame->GetTypeName();
+ }
+
+ Handle<StackFrameInfo> info = Handle<StackFrameInfo>::cast(
+ NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
+
+ DisallowHeapAllocation no_gc;
+
+ info->set_flag(0);
+ info->set_is_wasm(is_wasm);
+ info->set_is_asmjs_wasm(frame_array->IsAsmJsWasmFrame(index));
+ info->set_is_user_java_script(is_user_java_script);
+ info->set_line_number(line);
+ info->set_column_number(column);
+ info->set_script_id(script_id);
+
+ info->set_script_name(*script_name);
+ info->set_script_name_or_source_url(*script_or_url);
info->set_function_name(*function_name);
- info->set_wasm_module_name(*it.Frame()->GetWasmModuleName());
- info->set_is_eval(it.Frame()->IsEval());
- info->set_is_constructor(it.Frame()->IsConstructor());
- info->set_is_toplevel(it.Frame()->IsToplevel());
- info->set_is_async(it.Frame()->IsAsync());
- info->set_is_promise_all(it.Frame()->IsPromiseAll());
- info->set_promise_all_index(it.Frame()->GetPromiseIndex());
+ info->set_method_name(*method_name);
+ info->set_type_name(*type_name);
+ info->set_eval_origin(*eval_origin);
+ info->set_wasm_module_name(*wasm_module_name);
+
+ info->set_is_eval(frame->IsEval());
+ info->set_is_constructor(is_constructor);
+ info->set_is_toplevel(is_toplevel);
+ info->set_is_async(frame->IsAsync());
+ info->set_is_promise_all(frame->IsPromiseAll());
+ info->set_promise_all_index(frame->GetPromiseIndex());
return info;
}
@@ -3785,7 +3842,8 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
return map;
}
-Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
+Handle<LoadHandler> Factory::NewLoadHandler(int data_count,
+ AllocationType allocation) {
Handle<Map> map;
switch (data_count) {
case 1:
@@ -3800,7 +3858,7 @@ Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
default:
UNREACHABLE();
}
- return handle(LoadHandler::cast(New(map, AllocationType::kOld)), isolate());
+ return handle(LoadHandler::cast(New(map, allocation)), isolate());
}
Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 5af2529021..3ccbe6856f 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -53,16 +53,18 @@ class JSSetIterator;
class JSTypedArray;
class JSWeakMap;
class LoadHandler;
-class ModuleInfo;
class NativeContext;
class NewFunctionArgs;
class PreparseData;
class PromiseResolveThenableJobTask;
class RegExpMatchInfo;
class ScriptContextTable;
+class SourceTextModule;
+class SourceTextModuleInfo;
class StackFrameInfo;
class StackTraceFrame;
class StoreHandler;
+class SyntheticModule;
class TemplateObjectDescription;
class UncompiledDataWithoutPreparseData;
class UncompiledDataWithPreparseData;
@@ -406,7 +408,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<ScriptContextTable> NewScriptContextTable();
// Create a module context.
- Handle<Context> NewModuleContext(Handle<Module> module,
+ Handle<Context> NewModuleContext(Handle<SourceTextModule> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info);
@@ -461,7 +463,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
Handle<StackTraceFrame> NewStackTraceFrame(Handle<FrameArray> frame_array,
int index);
- Handle<StackFrameInfo> NewStackFrameInfo();
Handle<StackFrameInfo> NewStackFrameInfo(Handle<FrameArray> frame_array,
int index);
Handle<SourcePositionTableWithFrameCache>
@@ -626,10 +627,19 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSObject> NewJSObjectFromMap(
Handle<Map> map, AllocationType allocation = AllocationType::kYoung,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ // Like NewJSObjectFromMap, but includes allocating a properties dictionary.
Handle<JSObject> NewSlowJSObjectFromMap(
Handle<Map> map,
int number_of_slow_properties = NameDictionary::kInitialCapacity,
- AllocationType allocation = AllocationType::kYoung);
+ AllocationType allocation = AllocationType::kYoung,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ // Calls NewJSObjectFromMap or NewSlowJSObjectFromMap depending on whether the
+ // map is a dictionary map.
+ inline Handle<JSObject> NewFastOrSlowJSObjectFromMap(
+ Handle<Map> map,
+ int number_of_slow_properties = NameDictionary::kInitialCapacity,
+ AllocationType allocation = AllocationType::kYoung,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
// Allocates and initializes a new JavaScript object with the given
// {prototype} and {properties}. The newly created object will be
// in dictionary properties mode. The {elements} can either be the
@@ -680,7 +690,10 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSModuleNamespace> NewJSModuleNamespace();
- Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
+ Handle<SourceTextModule> NewSourceTextModule(Handle<SharedFunctionInfo> code);
+ Handle<SyntheticModule> NewSyntheticModule(
+ Handle<String> module_name, Handle<FixedArray> export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps);
Handle<JSArrayBuffer> NewJSArrayBuffer(
SharedFlag shared, AllocationType allocation = AllocationType::kYoung);
@@ -760,19 +773,18 @@ class V8_EXPORT_PRIVATE Factory {
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
- Handle<ModuleInfo> NewModuleInfo();
+ Handle<SourceTextModuleInfo> NewSourceTextModuleInfo();
Handle<PreparseData> NewPreparseData(int data_length, int children_length);
Handle<UncompiledDataWithoutPreparseData>
NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id);
+ int32_t end_position);
Handle<UncompiledDataWithPreparseData> NewUncompiledDataWithPreparseData(
Handle<String> inferred_name, int32_t start_position,
- int32_t end_position, int32_t function_literal_id, Handle<PreparseData>);
+ int32_t end_position, Handle<PreparseData>);
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -884,7 +896,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> ObjectLiteralMapFromCache(Handle<NativeContext> native_context,
int number_of_properties);
- Handle<LoadHandler> NewLoadHandler(int data_count);
+ Handle<LoadHandler> NewLoadHandler(
+ int data_count, AllocationType allocation = AllocationType::kOld);
Handle<StoreHandler> NewStoreHandler(int data_count);
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
@@ -1074,11 +1087,20 @@ class V8_EXPORT_PRIVATE Factory {
Handle<String> NumberToStringCacheSet(Handle<Object> number, int hash,
const char* string, bool check_cache);
- // Create a JSArray with no elements and no length.
- Handle<JSArray> NewJSArray(
- ElementsKind elements_kind,
+ // Creates a new JSArray with the given backing storage. Performs no
+ // verification of the backing storage because it may not yet be filled.
+ Handle<JSArray> NewJSArrayWithUnverifiedElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
AllocationType allocation = AllocationType::kYoung);
+ // Creates the backing storage for a JSArray. This handle must be discarded
+ // before returning the JSArray reference to code outside Factory, which might
+ // decide to left-trim the backing store. To avoid unnecessary HandleScopes,
+ // this method requires capacity greater than zero.
+ Handle<FixedArrayBase> NewJSArrayStorage(
+ ElementsKind elements_kind, int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
int maybe_builtin_index, FunctionKind kind = kNormalFunction);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index fab663d767..77e6b99997 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -18,9 +18,9 @@ namespace internal {
static size_t CountTotalHolesSize(Heap* heap) {
size_t holes_size = 0;
- PagedSpaces spaces(heap);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(heap);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
}
@@ -150,9 +150,11 @@ GCTracer::GCTracer(Heap* heap)
allocation_time_ms_(0.0),
new_space_allocation_counter_bytes_(0),
old_generation_allocation_counter_bytes_(0),
+ embedder_allocation_counter_bytes_(0),
allocation_duration_since_gc_(0.0),
new_space_allocation_in_bytes_since_gc_(0),
old_generation_allocation_in_bytes_since_gc_(0),
+ embedder_allocation_in_bytes_since_gc_(0),
combined_mark_compact_speed_cache_(0.0),
start_counter_(0),
average_mutator_duration_(0),
@@ -264,6 +266,12 @@ void GCTracer::Start(GarbageCollector collector,
counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
} else {
counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics before collection:\n");
+ heap_->PrintFreeListsStats();
+ }
}
}
@@ -377,6 +385,14 @@ void GCTracer::Stop(GarbageCollector collector) {
}
}
+void GCTracer::NotifySweepingCompleted() {
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics after sweeping completed:\n");
+ heap_->PrintFreeListsStats();
+ }
+}
+
void GCTracer::SampleAllocation(double current_ms,
size_t new_space_counter_bytes,
size_t old_generation_counter_bytes,
@@ -948,10 +964,9 @@ double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
}
double GCTracer::EmbedderSpeedInBytesPerMillisecond() const {
- if (recorded_embedder_speed_ != 0.0) {
- return recorded_embedder_speed_;
- }
- return kConservativeSpeedInBytesPerMillisecond;
+ // Note: Returning 0 is ok here as callers check for whether embedder speeds
+ // have been recorded at all.
+ return recorded_embedder_speed_;
}
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 4ddd0ef1c2..ec54b6c1ab 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -216,6 +216,8 @@ class V8_EXPORT_PRIVATE GCTracer {
// Stop collecting data and print results.
void Stop(GarbageCollector collector);
+ void NotifySweepingCompleted();
+
void NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling);
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 77e4870913..d59f8abe9f 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -33,20 +33,20 @@ double MemoryController<Trait>::MaxGrowingFactor(size_t max_heap_size) {
constexpr double kMaxSmallFactor = 2.0;
constexpr double kHighFactor = 4.0;
- size_t max_size_in_mb = max_heap_size / MB;
- max_size_in_mb = Max(max_size_in_mb, Trait::kMinSize);
+ size_t max_size = max_heap_size;
+ max_size = Max(max_size, Trait::kMinSize);
// If we are on a device with lots of memory, we allow a high heap
// growing factor.
- if (max_size_in_mb >= Trait::kMaxSize) {
+ if (max_size >= Trait::kMaxSize) {
return kHighFactor;
}
- DCHECK_GE(max_size_in_mb, Trait::kMinSize);
- DCHECK_LT(max_size_in_mb, Trait::kMaxSize);
+ DCHECK_GE(max_size, Trait::kMinSize);
+ DCHECK_LT(max_size, Trait::kMaxSize);
// On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_size_in_mb - Trait::kMinSize) *
+ double factor = (max_size - Trait::kMinSize) *
(kMaxSmallFactor - kMinSmallFactor) /
(Trait::kMaxSize - Trait::kMinSize) +
kMinSmallFactor;
@@ -126,8 +126,9 @@ size_t MemoryController<Trait>::MinimumAllocationLimitGrowingStep(
template <typename Trait>
size_t MemoryController<Trait>::CalculateAllocationLimit(
- Heap* heap, size_t current_size, size_t max_size, size_t new_space_capacity,
- double factor, Heap::HeapGrowingMode growing_mode) {
+ Heap* heap, size_t current_size, size_t min_size, size_t max_size,
+ size_t new_space_capacity, double factor,
+ Heap::HeapGrowingMode growing_mode) {
switch (growing_mode) {
case Heap::HeapGrowingMode::kConservative:
case Heap::HeapGrowingMode::kSlow:
@@ -155,9 +156,11 @@ size_t MemoryController<Trait>::CalculateAllocationLimit(
static_cast<uint64_t>(current_size) +
MinimumAllocationLimitGrowingStep(growing_mode)) +
new_space_capacity;
+ const uint64_t limit_above_min_size = Max<uint64_t>(limit, min_size);
const uint64_t halfway_to_the_max =
(static_cast<uint64_t>(current_size) + max_size) / 2;
- const size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
+ const size_t result =
+ static_cast<size_t>(Min(limit_above_min_size, halfway_to_the_max));
if (FLAG_trace_gc_verbose) {
Isolate::FromHeap(heap)->PrintWithTimestamp(
"[%s] Limit: old size: %zu KB, new limit: %zu KB (%.1f)\n",
diff --git a/deps/v8/src/heap/heap-controller.h b/deps/v8/src/heap/heap-controller.h
index bba1588669..d4a3534cd7 100644
--- a/deps/v8/src/heap/heap-controller.h
+++ b/deps/v8/src/heap/heap-controller.h
@@ -14,9 +14,8 @@ namespace v8 {
namespace internal {
struct BaseControllerTrait {
- // Sizes are in MB.
- static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
- static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
+ static constexpr size_t kMinSize = 128u * Heap::kPointerMultiplier * MB;
+ static constexpr size_t kMaxSize = 1024u * Heap::kPointerMultiplier * MB;
static constexpr double kMinGrowingFactor = 1.1;
static constexpr double kMaxGrowingFactor = 4.0;
@@ -43,7 +42,7 @@ class V8_EXPORT_PRIVATE MemoryController : public AllStatic {
double mutator_speed);
static size_t CalculateAllocationLimit(Heap* heap, size_t current_size,
- size_t max_size,
+ size_t min_size, size_t max_size,
size_t new_space_capacity,
double factor,
Heap::HeapGrowingMode growing_mode);
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 4ce35bd961..f2f7a7f692 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -263,15 +263,13 @@ void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
}
bool Heap::CanAllocateInReadOnlySpace() {
- return !deserialization_complete_ &&
- (isolate()->serializer_enabled() ||
- !isolate()->initialized_from_snapshot());
+ return read_only_space()->writable();
}
void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
- AllocationSpace allocation_space = memory_chunk->owner()->identity();
+ AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value =
@@ -374,13 +372,12 @@ bool Heap::InToPage(HeapObject heap_object) {
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
// static
-Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
+Heap* Heap::FromWritableHeapObject(HeapObject obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
- SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
- static_cast<ReadOnlySpace*>(chunk->owner())->writable());
+ SLOW_DCHECK(chunk->IsWritable());
Heap* heap = chunk->heap();
SLOW_DCHECK(heap != nullptr);
return heap;
@@ -408,7 +405,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
return AllocationMemento();
}
HeapObject candidate = HeapObject::FromAddress(memento_address);
- MapWordSlot candidate_map_slot = candidate.map_slot();
+ ObjectSlot candidate_map_slot = candidate.map_slot();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
@@ -614,8 +611,8 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
- DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
- (chunk_->owner()->identity() == CODE_LO_SPACE));
+ DCHECK(chunk_->owner_identity() == CODE_SPACE ||
+ (chunk_->owner_identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable();
}
}
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 6c5f20ac72..5687284b1e 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -11,9 +11,6 @@
#include "src/heap/heap-write-barrier.h"
#include "src/common/globals.h"
-// TODO(jkummerow): Get rid of this by moving GetIsolateFromWritableObject
-// elsewhere.
-#include "src/execution/isolate.h"
#include "src/objects/code.h"
#include "src/objects/compressed-slots-inl.h"
#include "src/objects/fixed-array.h"
@@ -42,27 +39,21 @@ V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject host, HeapObject descriptor_array,
int number_of_own_descriptors);
+V8_EXPORT_PRIVATE void Heap_GenerationalEphemeronKeyBarrierSlow(
+ Heap* heap, EphemeronHashTable table, Address slot);
+
// Do not use these internal details anywhere outside of this file. These
// internals are only intended to shortcut write barrier checks.
namespace heap_internals {
-struct Space {
- static constexpr uintptr_t kIdOffset = 9 * kSystemPointerSize;
- V8_INLINE AllocationSpace identity() {
- return *reinterpret_cast<AllocationSpace*>(reinterpret_cast<Address>(this) +
- kIdOffset);
- }
-};
-
struct MemoryChunk {
- static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
+ static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset =
- kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
- static constexpr uintptr_t kOwnerOffset =
- kHeapOffset + 2 * kSystemPointerSize;
+ kSizetSize + kUIntptrSize + kSystemPointerSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
+ static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
@@ -84,13 +75,12 @@ struct MemoryChunk {
V8_INLINE Heap* GetHeap() {
Heap* heap = *reinterpret_cast<Heap**>(reinterpret_cast<Address>(this) +
kHeapOffset);
- SLOW_DCHECK(heap != nullptr);
+ DCHECK_NOT_NULL(heap);
return heap;
}
- V8_INLINE Space* GetOwner() {
- return *reinterpret_cast<Space**>(reinterpret_cast<Address>(this) +
- kOwnerOffset);
+ V8_INLINE bool InReadOnlySpace() const {
+ return GetFlags() & kReadOnlySpaceBit;
}
};
@@ -122,8 +112,7 @@ inline void GenerationalEphemeronKeyBarrierInternal(EphemeronHashTable table,
return;
}
- Heap* heap = GetHeapFromWritableObject(table);
- heap->RecordEphemeronKeyWrite(table, slot);
+ Heap_GenerationalEphemeronKeyBarrierSlow(table_chunk->GetHeap(), table, slot);
}
inline void MarkingBarrierInternal(HeapObject object, Address slot,
@@ -231,27 +220,16 @@ inline WriteBarrierMode GetWriteBarrierModeForObject(
return UPDATE_WRITE_BARRIER;
}
-inline bool ObjectInYoungGeneration(const Object object) {
+inline bool ObjectInYoungGeneration(Object object) {
if (object.IsSmi()) return false;
return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object))
->InYoungGeneration();
}
-inline Heap* GetHeapFromWritableObject(const HeapObject object) {
+inline bool IsReadOnlyHeapObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- return chunk->GetHeap();
-}
-
-inline bool GetIsolateFromWritableObject(HeapObject obj, Isolate** isolate) {
- heap_internals::MemoryChunk* chunk =
- heap_internals::MemoryChunk::FromHeapObject(obj);
- if (chunk->GetOwner()->identity() == RO_SPACE) {
- *isolate = nullptr;
- return false;
- }
- *isolate = Isolate::FromHeap(chunk->GetHeap());
- return true;
+ return chunk->InReadOnlySpace();
}
} // namespace internal
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index ead17f9396..1126fd6f4b 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -41,7 +41,7 @@ void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
HeapObject descriptor_array,
int number_of_own_descriptors);
-Heap* GetHeapFromWritableObject(const HeapObject object);
+inline bool IsReadOnlyHeapObject(HeapObject object);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 52387b5bc1..7feb1c11ba 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -5,6 +5,7 @@
#include "src/heap/heap.h"
#include <cinttypes>
+#include <iomanip>
#include <unordered_map>
#include <unordered_set>
@@ -63,7 +64,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
@@ -118,6 +119,12 @@ void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
number_of_own_descriptors);
}
+void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
+ EphemeronHashTable table,
+ Address slot) {
+ heap->RecordEphemeronKeyWrite(table, slot);
+}
+
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
@@ -164,29 +171,21 @@ struct Heap::StrongRootsList {
class IdleScavengeObserver : public AllocationObserver {
public:
- IdleScavengeObserver(Heap& heap, intptr_t step_size)
+ IdleScavengeObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
- heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
+ heap_->ScheduleIdleScavengeIfNeeded(bytes_allocated);
}
private:
- Heap& heap_;
+ Heap* heap_;
};
Heap::Heap()
: isolate_(isolate()),
- initial_max_old_generation_size_(max_old_generation_size_),
- initial_max_old_generation_size_threshold_(0),
- initial_old_generation_size_(
- Min(max_old_generation_size_, kMaxInitialOldGenerationSize)),
memory_pressure_level_(MemoryPressureLevel::kNone),
- old_generation_allocation_limit_(initial_old_generation_size_),
- global_allocation_limit_(initial_old_generation_size_),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
- current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
- is_current_gc_forced_(false),
external_string_table_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
@@ -207,23 +206,87 @@ size_t Heap::MaxReserved() {
max_old_generation_size_);
}
-size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
- const size_t old_space_physical_memory_factor = 4;
- size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
- old_space_physical_memory_factor *
- kPointerMultiplier);
- size_t max_size_in_mb = V8HeapTrait::kMaxSize;
+size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
+ // Compute the semi space size and cap it.
+ size_t ratio = old_generation <= kOldGenerationLowMemory
+ ? kOldGenerationToSemiSpaceRatioLowMemory
+ : kOldGenerationToSemiSpaceRatio;
+ size_t semi_space = old_generation / ratio;
+ semi_space = Min<size_t>(semi_space, kMaxSemiSpaceSize);
+ semi_space = Max<size_t>(semi_space, kMinSemiSpaceSize);
+ semi_space = RoundUp(semi_space, Page::kPageSize);
+ return YoungGenerationSizeFromSemiSpaceSize(semi_space);
+}
+
+size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
+ // Compute the old generation size and cap it.
+ uint64_t old_generation = physical_memory /
+ kPhysicalMemoryToOldGenerationRatio *
+ kPointerMultiplier;
+ old_generation =
+ Min<uint64_t>(old_generation, MaxOldGenerationSize(physical_memory));
+ old_generation = Max<uint64_t>(old_generation, V8HeapTrait::kMinSize);
+ old_generation = RoundUp(old_generation, Page::kPageSize);
+
+ size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
+ static_cast<size_t>(old_generation));
+ return static_cast<size_t>(old_generation) + young_generation;
+}
+
+void Heap::GenerationSizesFromHeapSize(size_t heap_size,
+ size_t* young_generation_size,
+ size_t* old_generation_size) {
+ // Initialize values for the case when the given heap size is too small.
+ *young_generation_size = 0;
+ *old_generation_size = 0;
+ // Binary search for the largest old generation size that fits to the given
+ // heap limit considering the correspondingly sized young generation.
+ size_t lower = 0, upper = heap_size;
+ while (lower + 1 < upper) {
+ size_t old_generation = lower + (upper - lower) / 2;
+ size_t young_generation =
+ YoungGenerationSizeFromOldGenerationSize(old_generation);
+ if (old_generation + young_generation <= heap_size) {
+ // This size configuration fits into the given heap limit.
+ *young_generation_size = young_generation;
+ *old_generation_size = old_generation;
+ lower = old_generation;
+ } else {
+ upper = old_generation;
+ }
+ }
+}
+size_t Heap::MinYoungGenerationSize() {
+ return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize);
+}
+
+size_t Heap::MinOldGenerationSize() {
+ size_t paged_space_count =
+ LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
+ return paged_space_count * Page::kPageSize;
+}
+
+size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
+ size_t max_size = V8HeapTrait::kMaxSize;
// Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
// systems with physical memory bigger than 16GB.
constexpr bool x64_bit = Heap::kPointerMultiplier >= 2;
if (FLAG_huge_max_old_generation_size && x64_bit &&
physical_memory / GB > 16) {
- DCHECK_LE(max_size_in_mb, 4096);
- max_size_in_mb = 4096; // 4GB
+ DCHECK_EQ(max_size / GB, 2);
+ max_size *= 2;
}
+ return max_size;
+}
- return Max(Min(computed_size, max_size_in_mb), V8HeapTrait::kMinSize);
+size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
+ return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
+}
+
+size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
+ size_t young_generation_size) {
+ return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
}
size_t Heap::Capacity() {
@@ -234,10 +297,10 @@ size_t Heap::Capacity() {
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->Capacity();
}
return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
@@ -246,10 +309,10 @@ size_t Heap::OldGenerationCapacity() {
size_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->CommittedMemory();
}
return total + lo_space_->Size() + code_lo_space_->Size();
@@ -273,8 +336,8 @@ size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->CommittedPhysicalMemory();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->CommittedPhysicalMemory();
}
return total;
@@ -301,8 +364,8 @@ size_t Heap::Available() {
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->Available();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->Available();
}
total += memory_allocator()->Available();
@@ -311,7 +374,7 @@ size_t Heap::Available() {
bool Heap::CanExpandOldGeneration(size_t size) {
if (force_oom_) return false;
- if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
+ if (OldGenerationCapacity() + size > max_old_generation_size_) return false;
// The OldGenerationCapacity does not account compaction spaces used
// during evacuation. Ensure that expanding the old generation does push
// the total allocated memory size over the maximum heap size.
@@ -443,6 +506,81 @@ void Heap::PrintShortHeapStatistics() {
total_gc_time_ms_);
}
+void Heap::PrintFreeListsStats() {
+ DCHECK(FLAG_trace_gc_freelists);
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ PrintIsolate(isolate_,
+ "Freelists statistics per Page: "
+ "[category: length || total free bytes]\n");
+ }
+
+ std::vector<int> categories_lengths(
+ old_space()->free_list()->number_of_categories(), 0);
+ std::vector<size_t> categories_sums(
+ old_space()->free_list()->number_of_categories(), 0);
+ unsigned int pageCnt = 0;
+
+ // This loops computes freelists lengths and sum.
+ // If FLAG_trace_gc_freelists_verbose is enabled, it also prints
+ // the stats of each FreeListCategory of each Page.
+ for (Page* page : *old_space()) {
+ std::ostringstream out_str;
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ out_str << "Page " << std::setw(4) << pageCnt;
+ }
+
+ for (int cat = kFirstCategory;
+ cat <= old_space()->free_list()->last_category(); cat++) {
+ FreeListCategory* free_list =
+ page->free_list_category(static_cast<FreeListCategoryType>(cat));
+ int length = free_list->FreeListLength();
+ size_t sum = free_list->SumFreeList();
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ out_str << "[" << cat << ": " << std::setw(4) << length << " || "
+ << std::setw(6) << sum << " ]"
+ << (cat == old_space()->free_list()->last_category() ? "\n"
+ : ", ");
+ }
+ categories_lengths[cat] += length;
+ categories_sums[cat] += sum;
+ }
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ PrintIsolate(isolate_, "%s", out_str.str().c_str());
+ }
+
+ pageCnt++;
+ }
+
+ // Print statistics about old_space (pages, free/wasted/used memory...).
+ PrintIsolate(
+ isolate_,
+ "%d pages. Free space: %.1f MB (waste: %.2f). "
+ "Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
+ pageCnt, static_cast<double>(old_space_->Available()) / MB,
+ static_cast<double>(old_space_->Waste()) / MB,
+ static_cast<double>(old_space_->Size()) / MB,
+ static_cast<double>(old_space_->Capacity()) / MB,
+ static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
+
+ // Print global statistics of each FreeListCategory (length & sum).
+ PrintIsolate(isolate_,
+ "FreeLists global statistics: "
+ "[category: length || total free KB]\n");
+ std::ostringstream out_str;
+ for (int cat = kFirstCategory;
+ cat <= old_space()->free_list()->last_category(); cat++) {
+ out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
+ << std::fixed << std::setprecision(2)
+ << static_cast<double>(categories_sums[cat]) / KB << " KB]"
+ << (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
+ }
+ PrintIsolate(isolate_, "%s", out_str.str().c_str());
+}
+
void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
HeapStatistics stats;
reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
@@ -483,7 +621,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
MEMBER("malloced_memory") << stats.malloced_memory() << ","
MEMBER("external_memory") << stats.external_memory() << ","
MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
- MEMBER("pages") << LIST(
+ MEMBER("spaces") << LIST(
SpaceStatistics(RO_SPACE) << "," <<
SpaceStatistics(NEW_SPACE) << "," <<
SpaceStatistics(OLD_SPACE) << "," <<
@@ -693,8 +831,8 @@ void Heap::GarbageCollectionPrologue() {
size_t Heap::SizeOfObjects() {
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->SizeOfObjects();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->SizeOfObjects();
}
return total;
}
@@ -750,8 +888,8 @@ void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- for (SpaceIterator it(this); it.has_next();) {
- Space* space = it.next();
+ for (SpaceIterator it(this); it.HasNext();) {
+ Space* space = it.Next();
if (space == new_space()) {
space->AddAllocationObserver(new_space_observer);
} else {
@@ -764,8 +902,8 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- for (SpaceIterator it(this); it.has_next();) {
- Space* space = it.next();
+ for (SpaceIterator it(this); it.HasNext();) {
+ Space* space = it.Next();
if (space == new_space()) {
space->RemoveAllocationObserver(new_space_observer);
} else {
@@ -1194,27 +1332,27 @@ intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
return 0;
}
-void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
- if (objects.size() == 0) return;
+void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
+ if (objects->size() == 0) return;
- sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
+ sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
intptr_t c = CompareWords(size, a, b);
if (c != 0) return c < 0;
return a < b;
});
std::vector<std::pair<int, HeapObject>> duplicates;
- HeapObject current = objects[0];
+ HeapObject current = (*objects)[0];
int count = 1;
- for (size_t i = 1; i < objects.size(); i++) {
- if (CompareWords(size, current, objects[i]) == 0) {
+ for (size_t i = 1; i < objects->size(); i++) {
+ if (CompareWords(size, current, (*objects)[i]) == 0) {
count++;
} else {
if (count > 1) {
duplicates.push_back(std::make_pair(count - 1, current));
}
count = 1;
- current = objects[i];
+ current = (*objects)[i];
}
}
if (count > 1) {
@@ -1274,29 +1412,30 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
- new_lo_space_->SetCapacity(new_space_->Capacity());
+ new_lo_space_->SetCapacity(new_space_->Capacity() *
+ kNewLargeObjectSpaceToSemiSpaceRatio);
UncommitFromSpace();
EagerlyFreeExternalMemory();
if (FLAG_trace_duplicate_threshold_kb) {
std::map<int, std::vector<HeapObject>> objects_by_size;
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
- HeapObjectIterator it(space);
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ PagedSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
{
- LargeObjectIterator it(lo_space());
+ LargeObjectSpaceObjectIterator it(lo_space());
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
- ReportDuplicates(it->first, it->second);
+ ReportDuplicates(it->first, &it->second);
}
}
}
@@ -1669,7 +1808,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
for (int space = FIRST_SPACE;
- space < SerializerDeserializer::kNumberOfSpaces; space++) {
+ space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces);
+ space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) {
@@ -1727,8 +1867,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space.address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
- DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
- space);
+ DCHECK(IsPreAllocatedSpace(static_cast<SnapshotSpace>(space)));
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
@@ -1993,14 +2132,16 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
old_generation_allocation_limit_ =
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
- this, old_gen_size, max_old_generation_size_, new_space_capacity,
- v8_growing_factor, mode);
+ this, old_gen_size, min_old_generation_size_,
+ max_old_generation_size_, new_space_capacity, v8_growing_factor,
+ mode);
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
global_allocation_limit_ =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), max_global_memory_size_,
- new_space_capacity, global_growing_factor, mode);
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity,
+ global_growing_factor, mode);
}
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
@@ -2008,8 +2149,9 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
old_generation_size_configured_) {
size_t new_old_generation_limit =
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
- this, old_gen_size, max_old_generation_size_, new_space_capacity,
- v8_growing_factor, mode);
+ this, old_gen_size, min_old_generation_size_,
+ max_old_generation_size_, new_space_capacity, v8_growing_factor,
+ mode);
if (new_old_generation_limit < old_generation_allocation_limit_) {
old_generation_allocation_limit_ = new_old_generation_limit;
}
@@ -2017,8 +2159,9 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
DCHECK_GT(global_growing_factor, 0);
size_t new_global_limit =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), max_global_memory_size_,
- new_space_capacity, global_growing_factor, mode);
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity,
+ global_growing_factor, mode);
if (new_global_limit < global_allocation_limit_) {
global_allocation_limit_ = new_global_limit;
}
@@ -2433,8 +2576,8 @@ void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
if (!young_strings_.empty()) {
v->VisitRootPointers(
Root::kExternalStringsTable, nullptr,
- FullObjectSlot(&young_strings_[0]),
- FullObjectSlot(&young_strings_[young_strings_.size()]));
+ FullObjectSlot(young_strings_.data()),
+ FullObjectSlot(young_strings_.data() + young_strings_.size()));
}
}
@@ -2596,6 +2739,7 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif
#ifdef V8_HOST_ARCH_32_BIT
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
#endif
@@ -2981,7 +3125,7 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// We do not create a filler for objects in a large object space.
if (!IsLargeObject(object)) {
HeapObject filler =
- CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+ CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kNo);
DCHECK(!filler.is_null());
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
@@ -3229,7 +3373,8 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
- for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE;
+ i < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
@@ -3634,8 +3779,8 @@ void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack(stdout);
- for (SpaceIterator it(this); it.has_next();) {
- it.next()->Print();
+ for (SpaceIterator it(this); it.HasNext();) {
+ it.Next()->Print();
}
}
@@ -3704,6 +3849,9 @@ const char* Heap::GarbageCollectionReasonToString(
}
bool Heap::Contains(HeapObject value) {
+ if (ReadOnlyHeap::Contains(value)) {
+ return false;
+ }
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -3736,7 +3884,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) {
case NEW_LO_SPACE:
return new_lo_space_->Contains(value);
case RO_SPACE:
- return read_only_space_->Contains(value);
+ return ReadOnlyHeap::Contains(value);
}
UNREACHABLE();
}
@@ -3842,9 +3990,9 @@ void Heap::Verify() {
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
// TODO(v8:7464): Always verify read-only space once PagedSpace::Verify
- // supports verifying shared read-only space. Currently HeapObjectIterator is
- // explicitly disabled for read-only space when sharing is enabled, because it
- // relies on PagedSpace::heap_ being non-null.
+ // supports verifying shared read-only space. Currently
+ // PagedSpaceObjectIterator is explicitly disabled for read-only space when
+ // sharing is enabled, because it relies on PagedSpace::heap_ being non-null.
#ifndef V8_SHARED_RO_HEAP
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
@@ -3997,17 +4145,17 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->VerifyCountersAfterSweeping();
}
}
void Heap::VerifyCountersBeforeConcurrentSweeping() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->VerifyCountersBeforeConcurrentSweeping();
}
}
@@ -4259,89 +4407,139 @@ void Heap::IterateBuiltins(RootVisitor* v) {
#endif // V8_EMBEDDED_BUILTINS
}
-// TODO(1236194): Since the heap size is configurable on the command line
-// and through the API, we should gracefully handle the case that the heap
-// size is not big enough to fit all the initial objects.
-void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
- size_t max_old_generation_size_in_mb,
- size_t code_range_size_in_mb) {
- // Overwrite default configuration.
- if (max_semi_space_size_in_kb != 0) {
+namespace {
+size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
+ const size_t kGlobalMemoryToV8Ratio = 2;
+ return Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
+ static_cast<uint64_t>(v8_size) * kGlobalMemoryToV8Ratio);
+}
+} // anonymous namespace
+
+void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
+ // Initialize max_semi_space_size_.
+ {
+ max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
+ if (constraints.max_young_generation_size_in_bytes() > 0) {
+ max_semi_space_size_ = SemiSpaceSizeFromYoungGenerationSize(
+ constraints.max_young_generation_size_in_bytes());
+ }
+ if (FLAG_max_semi_space_size > 0) {
+ max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
+ } else if (FLAG_max_heap_size > 0) {
+ size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
+ size_t young_generation_size, old_generation_size;
+ if (FLAG_max_old_space_size > 0) {
+ old_generation_size = static_cast<size_t>(FLAG_max_old_space_size) * MB;
+ young_generation_size = max_heap_size > old_generation_size
+ ? max_heap_size - old_generation_size
+ : 0;
+ } else {
+ GenerationSizesFromHeapSize(max_heap_size, &young_generation_size,
+ &old_generation_size);
+ }
+ max_semi_space_size_ =
+ SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
+ }
+ if (FLAG_stress_compaction) {
+ // This will cause more frequent GCs when stressing.
+ max_semi_space_size_ = MB;
+ }
+ // The new space size must be a power of two to support single-bit testing
+ // for containment.
+ // TODO(ulan): Rounding to a power of 2 is not longer needed. Remove it.
max_semi_space_size_ =
- RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
- }
- if (max_old_generation_size_in_mb != 0) {
- max_old_generation_size_ = max_old_generation_size_in_mb * MB;
+ static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
+ static_cast<uint64_t>(max_semi_space_size_)));
+ max_semi_space_size_ = Max(max_semi_space_size_, kMinSemiSpaceSize);
+ max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
}
- // If max space size flags are specified overwrite the configuration.
- if (FLAG_max_semi_space_size > 0) {
- max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
- }
- if (FLAG_max_old_space_size > 0) {
+ // Initialize max_old_generation_size_ and max_global_memory_.
+ {
+ max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
+ if (constraints.max_old_generation_size_in_bytes() > 0) {
+ max_old_generation_size_ = constraints.max_old_generation_size_in_bytes();
+ }
+ if (FLAG_max_old_space_size > 0) {
+ max_old_generation_size_ =
+ static_cast<size_t>(FLAG_max_old_space_size) * MB;
+ } else if (FLAG_max_heap_size > 0) {
+ size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
+ size_t young_generation_size =
+ YoungGenerationSizeFromSemiSpaceSize(max_semi_space_size_);
+ max_old_generation_size_ = max_heap_size > young_generation_size
+ ? max_heap_size - young_generation_size
+ : 0;
+ }
max_old_generation_size_ =
- static_cast<size_t>(FLAG_max_old_space_size) * MB;
- }
-
- if (Page::kPageSize > MB) {
- max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
+ Max(max_old_generation_size_, MinOldGenerationSize());
max_old_generation_size_ =
- RoundUp<Page::kPageSize>(max_old_generation_size_);
- }
+ RoundDown<Page::kPageSize>(max_old_generation_size_);
- if (FLAG_stress_compaction) {
- // This will cause more frequent GCs when stressing.
- max_semi_space_size_ = MB;
+ max_global_memory_size_ =
+ GlobalMemorySizeFromV8Size(max_old_generation_size_);
}
- // The new space size must be a power of two to support single-bit testing
- // for containment.
- max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
- static_cast<uint64_t>(max_semi_space_size_)));
+ CHECK_IMPLIES(FLAG_max_heap_size > 0,
+ FLAG_max_semi_space_size == 0 || FLAG_max_old_space_size == 0);
- if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
- // Start with at least 1*MB semi-space on machines with a lot of memory.
- initial_semispace_size_ =
- Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
- }
-
- if (FLAG_min_semi_space_size > 0) {
- size_t initial_semispace_size =
- static_cast<size_t>(FLAG_min_semi_space_size) * MB;
- if (initial_semispace_size > max_semi_space_size_) {
- initial_semispace_size_ = max_semi_space_size_;
- if (FLAG_trace_gc) {
- PrintIsolate(isolate_,
- "Min semi-space size cannot be more than the maximum "
- "semi-space size of %zu MB\n",
- max_semi_space_size_ / MB);
- }
- } else {
+ // Initialize initial_semispace_size_.
+ {
+ initial_semispace_size_ = kMinSemiSpaceSize;
+ if (max_semi_space_size_ == kMaxSemiSpaceSize) {
+ // Start with at least 1*MB semi-space on machines with a lot of memory.
+ initial_semispace_size_ =
+ Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
+ }
+ if (constraints.initial_young_generation_size_in_bytes() > 0) {
+ initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
+ constraints.initial_young_generation_size_in_bytes());
+ }
+ if (FLAG_min_semi_space_size > 0) {
initial_semispace_size_ =
- RoundUp<Page::kPageSize>(initial_semispace_size);
+ static_cast<size_t>(FLAG_min_semi_space_size) * MB;
+ }
+ initial_semispace_size_ =
+ Min(initial_semispace_size_, max_semi_space_size_);
+ initial_semispace_size_ =
+ RoundDown<Page::kPageSize>(initial_semispace_size_);
+ }
+
+ // Initialize initial_old_space_size_.
+ {
+ initial_old_generation_size_ = kMaxInitialOldGenerationSize;
+ if (constraints.initial_old_generation_size_in_bytes() > 0) {
+ initial_old_generation_size_ =
+ constraints.initial_old_generation_size_in_bytes();
+ old_generation_size_configured_ = true;
+ }
+ if (FLAG_initial_old_space_size > 0) {
+ initial_old_generation_size_ =
+ static_cast<size_t>(FLAG_initial_old_space_size) * MB;
+ old_generation_size_configured_ = true;
}
+ initial_old_generation_size_ =
+ Min(initial_old_generation_size_, max_old_generation_size_ / 2);
+ initial_old_generation_size_ =
+ RoundDown<Page::kPageSize>(initial_old_generation_size_);
}
- initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
+ if (old_generation_size_configured_) {
+ // If the embedder pre-configures the initial old generation size,
+ // then allow V8 to skip full GCs below that threshold.
+ min_old_generation_size_ = initial_old_generation_size_;
+ min_global_memory_size_ =
+ GlobalMemorySizeFromV8Size(min_old_generation_size_);
+ }
if (FLAG_semi_space_growth_factor < 2) {
FLAG_semi_space_growth_factor = 2;
}
- // The old generation is paged and needs at least one page for each space.
- int paged_space_count =
- LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
- initial_max_old_generation_size_ = max_old_generation_size_ =
- Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
- max_old_generation_size_);
-
- if (FLAG_initial_old_space_size > 0) {
- initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
- } else {
- initial_old_generation_size_ =
- Min(max_old_generation_size_, kMaxInitialOldGenerationSize);
- }
old_generation_allocation_limit_ = initial_old_generation_size_;
+ global_allocation_limit_ =
+ GlobalMemorySizeFromV8Size(old_generation_allocation_limit_);
+ initial_max_old_generation_size_ = max_old_generation_size_;
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(kMaxRegularHeapObjectSize >=
@@ -4349,12 +4547,11 @@ void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
- code_range_size_ = code_range_size_in_mb * MB;
+ code_range_size_ = constraints.code_range_size_in_bytes();
configured_ = true;
}
-
void Heap::AddToRingBuffer(const char* string) {
size_t first_part =
Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
@@ -4378,7 +4575,10 @@ void Heap::GetFromRingBuffer(char* buffer) {
memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
}
-void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
+void Heap::ConfigureHeapDefault() {
+ v8::ResourceConstraints constraints;
+ ConfigureHeap(constraints);
+}
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
@@ -4403,9 +4603,9 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
- HeapIterator iterator(this);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(this);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
InstanceType type = obj.map().instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
stats->objects_per_type[type]++;
@@ -4426,10 +4626,10 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
size_t Heap::OldGenerationSizeOfObjects() {
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->SizeOfObjects();
}
return total + lo_space_->SizeOfObjects();
@@ -4437,10 +4637,9 @@ size_t Heap::OldGenerationSizeOfObjects() {
size_t Heap::GlobalSizeOfObjects() {
const size_t on_heap_size = OldGenerationSizeOfObjects();
- const size_t embedder_size =
- local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->allocated_size()
- : 0;
+ const size_t embedder_size = local_embedder_heap_tracer()
+ ? local_embedder_heap_tracer()->used_size()
+ : 0;
return on_heap_size + embedder_size;
}
@@ -4455,6 +4654,40 @@ uint64_t Heap::PromotedExternalMemorySize() {
isolate_data->external_memory_at_last_mark_compact_);
}
+bool Heap::AllocationLimitOvershotByLargeMargin() {
+ // This guards against too eager finalization in small heaps.
+ // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
+ constexpr size_t kMarginForSmallHeaps = 32u * MB;
+
+ const size_t v8_overshoot =
+ old_generation_allocation_limit_ <
+ OldGenerationObjectsAndPromotedExternalMemorySize()
+ ? OldGenerationObjectsAndPromotedExternalMemorySize() -
+ old_generation_allocation_limit_
+ : 0;
+ const size_t global_overshoot =
+ global_allocation_limit_ < GlobalSizeOfObjects()
+ ? GlobalSizeOfObjects() - global_allocation_limit_
+ : 0;
+
+ // Bail out if the V8 and global sizes are still below their respective
+ // limits.
+ if (v8_overshoot == 0 && global_overshoot == 0) {
+ return false;
+ }
+
+ // Overshoot margin is 50% of allocation limit or half-way to the max heap
+ // with special handling of small heaps.
+ const size_t v8_margin =
+ Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
+ const size_t global_margin =
+ Min(Max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_global_memory_size_ - global_allocation_limit_) / 2);
+
+ return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
+}
+
bool Heap::ShouldOptimizeForLoadTime() {
return isolate()->rail_mode() == PERFORMANCE_LOAD &&
!AllocationLimitOvershotByLargeMargin() &&
@@ -4508,7 +4741,7 @@ size_t Heap::GlobalMemoryAvailable() {
? GlobalSizeOfObjects() < global_allocation_limit_
? global_allocation_limit_ - GlobalSizeOfObjects()
: 0
- : 1;
+ : new_space_->Capacity() + 1;
}
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
@@ -4526,8 +4759,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (FLAG_stress_incremental_marking) {
return IncrementalMarkingLimit::kHardLimit;
}
- if (OldGenerationSizeOfObjects() <=
- IncrementalMarking::kActivationThreshold) {
+ if (incremental_marking()->IsBelowActivationThresholds()) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
@@ -4574,7 +4806,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
const size_t global_memory_available = GlobalMemoryAvailable();
if (old_generation_space_available > new_space_->Capacity() &&
- (global_memory_available > 0)) {
+ (global_memory_available > new_space_->Capacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -4609,10 +4841,10 @@ void Heap::DisableInlineAllocation() {
new_space()->UpdateInlineAllocationLimit(0);
// Update inline allocation limit for old spaces.
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
CodeSpaceMemoryModificationScope modification_scope(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->FreeLinearAllocationArea();
}
}
@@ -4769,7 +5001,6 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space());
- read_only_heap_ = ro_heap;
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
}
@@ -4822,7 +5053,7 @@ void Heap::SetUpSpaces() {
if (FLAG_idle_time_scavenge) {
scavenge_job_.reset(new ScavengeJob());
idle_scavenge_observer_.reset(new IdleScavengeObserver(
- *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
+ this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
}
@@ -4831,12 +5062,12 @@ void Heap::SetUpSpaces() {
if (FLAG_stress_marking > 0) {
stress_marking_percentage_ = NextStressMarkingLimit();
- stress_marking_observer_ = new StressMarkingObserver(*this);
+ stress_marking_observer_ = new StressMarkingObserver(this);
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
if (FLAG_stress_scavenge > 0) {
- stress_scavenge_observer_ = new StressScavengeObserver(*this);
+ stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
@@ -4908,8 +5139,8 @@ int Heap::NextStressMarkingLimit() {
}
void Heap::NotifyDeserializationComplete() {
- PagedSpaces spaces(this);
- for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
@@ -5055,7 +5286,7 @@ void Heap::TearDown() {
tracer_.reset();
- read_only_heap_->OnHeapTearDown();
+ isolate()->read_only_heap()->OnHeapTearDown();
space_[RO_SPACE] = read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
@@ -5158,8 +5389,8 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
- HeapIterator iterator(this);
- for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
+ HeapObjectIterator iterator(this);
+ for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
if (o.IsPrototypeInfo()) {
PrototypeInfo prototype_info = PrototypeInfo::cast(o);
if (prototype_info.prototype_users().IsWeakArrayList()) {
@@ -5309,7 +5540,7 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(slot.address());
}
}
@@ -5319,7 +5550,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
@@ -5332,17 +5563,16 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(start, end);
}
}
-PagedSpace* PagedSpaces::next() {
+PagedSpace* PagedSpaceIterator::Next() {
switch (counter_++) {
case RO_SPACE:
- // skip NEW_SPACE
- counter_++;
- return heap_->read_only_space();
+ case NEW_SPACE:
+ UNREACHABLE();
case OLD_SPACE:
return heap_->old_space();
case CODE_SPACE:
@@ -5359,17 +5589,16 @@ SpaceIterator::SpaceIterator(Heap* heap)
SpaceIterator::~SpaceIterator() = default;
-bool SpaceIterator::has_next() {
+bool SpaceIterator::HasNext() {
// Iterate until no more spaces.
return current_space_ != LAST_SPACE;
}
-Space* SpaceIterator::next() {
- DCHECK(has_next());
+Space* SpaceIterator::Next() {
+ DCHECK(HasNext());
return heap_->space(++current_space_);
}
-
class HeapObjectsFilter {
public:
virtual ~HeapObjectsFilter() = default;
@@ -5486,8 +5715,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
reachable_;
};
-HeapIterator::HeapIterator(Heap* heap,
- HeapIterator::HeapObjectsFiltering filtering)
+HeapObjectIterator::HeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
: heap_(heap),
filtering_(filtering),
filter_(nullptr),
@@ -5503,11 +5732,10 @@ HeapIterator::HeapIterator(Heap* heap,
default:
break;
}
- object_iterator_ = space_iterator_->next()->GetObjectIterator();
+ object_iterator_ = space_iterator_->Next()->GetObjectIterator();
}
-
-HeapIterator::~HeapIterator() {
+HeapObjectIterator::~HeapObjectIterator() {
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
@@ -5519,7 +5747,7 @@ HeapIterator::~HeapIterator() {
delete filter_;
}
-HeapObject HeapIterator::next() {
+HeapObject HeapObjectIterator::Next() {
if (filter_ == nullptr) return NextObject();
HeapObject obj = NextObject();
@@ -5527,7 +5755,7 @@ HeapObject HeapIterator::next() {
return obj;
}
-HeapObject HeapIterator::NextObject() {
+HeapObject HeapObjectIterator::NextObject() {
// No iterator means we are done.
if (object_iterator_.get() == nullptr) return HeapObject();
@@ -5537,8 +5765,8 @@ HeapObject HeapIterator::NextObject() {
return obj;
} else {
// Go though the spaces looking for one that has objects.
- while (space_iterator_->has_next()) {
- object_iterator_ = space_iterator_->next()->GetObjectIterator();
+ while (space_iterator_->HasNext()) {
+ object_iterator_ = space_iterator_->Next()->GetObjectIterator();
obj = object_iterator_.get()->Next();
if (!obj.is_null()) {
return obj;
@@ -5686,7 +5914,7 @@ void Heap::AddDirtyJSFinalizationGroup(
// for the root pointing to the first JSFinalizationGroup.
}
-void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
+void Heap::KeepDuringJob(Handle<JSReceiver> target) {
DCHECK(FLAG_harmony_weak_refs);
DCHECK(weak_refs_keep_during_job().IsUndefined() ||
weak_refs_keep_during_job().IsOrderedHashSet());
@@ -5701,7 +5929,7 @@ void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
set_weak_refs_keep_during_job(*table);
}
-void Heap::ClearKeepDuringJobSet() {
+void Heap::ClearKeptObjects() {
set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
}
@@ -5844,7 +6072,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = map.instance_type();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
- AllocationSpace src = chunk->owner()->identity();
+ AllocationSpace src = chunk->owner_identity();
switch (src) {
case NEW_SPACE:
return dst == NEW_SPACE || dst == OLD_SPACE;
@@ -5864,7 +6092,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
size_t Heap::EmbedderAllocationCounter() const {
return local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->accumulated_allocated_size()
+ ? local_embedder_heap_tracer()->allocated_size()
: 0;
}
@@ -6133,16 +6361,16 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking());
- Space* chunk_owner = chunk->owner();
- AllocationSpace identity = chunk_owner->identity();
+ AllocationSpace identity = chunk->owner_identity();
// Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
slim_chunk->InYoungGeneration());
+ // Read-only consistency.
+ CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
// Marking consistency.
- if (identity != RO_SPACE ||
- static_cast<ReadOnlySpace*>(chunk->owner())->writable()) {
+ if (chunk->IsWritable()) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
@@ -6155,25 +6383,6 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
return true;
}
-static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
- heap_internals::MemoryChunk::kMarkingBit,
- "Incremental marking flag inconsistent");
-static_assert(MemoryChunk::Flag::FROM_PAGE ==
- heap_internals::MemoryChunk::kFromPageBit,
- "From page flag inconsistent");
-static_assert(MemoryChunk::Flag::TO_PAGE ==
- heap_internals::MemoryChunk::kToPageBit,
- "To page flag inconsistent");
-static_assert(MemoryChunk::kFlagsOffset ==
- heap_internals::MemoryChunk::kFlagsOffset,
- "Flag offset inconsistent");
-static_assert(MemoryChunk::kHeapOffset ==
- heap_internals::MemoryChunk::kHeapOffset,
- "Heap offset inconsistent");
-static_assert(MemoryChunk::kOwnerOffset ==
- heap_internals::MemoryChunk::kOwnerOffset,
- "Owner offset inconsistent");
-
void Heap::SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state) {
local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index a242bd80d1..81f2b0dd8c 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -44,29 +44,20 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
-class ObjectBoilerplateDescription;
-class BytecodeArray;
-class CodeDataContainer;
-class DeoptimizationData;
-class HandlerTable;
class IncrementalMarking;
class JSArrayBuffer;
-class ExternalString;
using v8::MemoryPressureLevel;
class AllocationObserver;
class ArrayBufferCollector;
-class ArrayBufferTracker;
class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
-class HeapController;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
-class HistogramTimer;
class Isolate;
class JSFinalizationGroup;
class LocalEmbedderHeapTracer;
@@ -86,7 +77,6 @@ class Space;
class StoreBuffer;
class StressScavengeObserver;
class TimedHistogram;
-class TracePossibleWrapperReporter;
class WeakObjectRetainer;
enum ArrayStorageAllocationMode {
@@ -243,19 +233,24 @@ class Heap {
// should instead adapt it's heap size based on available physical memory.
static const int kPointerMultiplier = 1;
#else
- // TODO(ishell): kSystePointerMultiplier?
- static const int kPointerMultiplier = i::kSystemPointerSize / 4;
+ static const int kPointerMultiplier = i::kTaggedSize / 4;
#endif
static const size_t kMaxInitialOldGenerationSize =
256 * MB * kPointerMultiplier;
- // Semi-space size needs to be a multiple of page size.
- static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier;
- static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier;
+ // These constants control heap configuration based on the physical memory.
+ static constexpr size_t kPhysicalMemoryToOldGenerationRatio = 4;
+ static constexpr size_t kOldGenerationToSemiSpaceRatio = 128;
+ static constexpr size_t kOldGenerationToSemiSpaceRatioLowMemory = 256;
+ static constexpr size_t kOldGenerationLowMemory =
+ 128 * MB * kPointerMultiplier;
+ static constexpr size_t kNewLargeObjectSpaceToSemiSpaceRatio = 1;
+ static constexpr size_t kMinSemiSpaceSize = 512 * KB * kPointerMultiplier;
+ static constexpr size_t kMaxSemiSpaceSize = 8192 * KB * kPointerMultiplier;
- STATIC_ASSERT(kMinSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
- STATIC_ASSERT(kMaxSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
+ STATIC_ASSERT(kMinSemiSpaceSize % (1 << kPageSizeBits) == 0);
+ STATIC_ASSERT(kMaxSemiSpaceSize % (1 << kPageSizeBits) == 0);
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -365,8 +360,8 @@ class Heap {
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
- V8_EXPORT_PRIVATE void RecordEphemeronKeyWrite(EphemeronHashTable table,
- Address key_slot);
+ V8_EXPORT_PRIVATE inline void RecordEphemeronKeyWrite(
+ EphemeronHashTable table, Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
@@ -477,6 +472,12 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
+ // Print statistics of freelists of old_space:
+ // with FLAG_trace_gc_freelists: summary of each FreeListCategory.
+ // with FLAG_trace_gc_freelists_verbose: also prints the statistics of each
+ // FreeListCategory of each page.
+ void PrintFreeListsStats();
+
// Dump heap statistics in JSON format.
void DumpJSONHeapStatistics(std::stringstream& stream);
@@ -571,7 +572,7 @@ class Heap {
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
- int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
+ int64_t external_memory_hard_limit() { return max_old_generation_size_ / 2; }
V8_INLINE int64_t external_memory();
V8_INLINE void update_external_memory(int64_t delta);
@@ -619,13 +620,7 @@ class Heap {
// Initialization. ===========================================================
// ===========================================================================
- // Configure heap sizes
- // max_semi_space_size_in_kb: maximum semi-space size in KB
- // max_old_generation_size_in_mb: maximum old generation size in MB
- // code_range_size_in_mb: code range size in MB
- void ConfigureHeap(size_t max_semi_space_size_in_kb,
- size_t max_old_generation_size_in_mb,
- size_t code_range_size_in_mb);
+ void ConfigureHeap(const v8::ResourceConstraints& constraints);
void ConfigureHeapDefault();
// Prepares the heap, setting up for deserialization.
@@ -681,8 +676,6 @@ class Heap {
// Getters to other components. ==============================================
// ===========================================================================
- ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
-
GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
@@ -748,8 +741,8 @@ class Heap {
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
- V8_EXPORT_PRIVATE void AddKeepDuringJobTarget(Handle<JSReceiver> target);
- void ClearKeepDuringJobSet();
+ V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target);
+ void ClearKeptObjects();
// ===========================================================================
// Inline allocation. ========================================================
@@ -986,8 +979,9 @@ class Heap {
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
- // Checks whether an address/object in the heap (including auxiliary
- // area and unused area).
+ // Checks whether an address/object is in the non-read-only heap (including
+ // auxiliary area and unused area). Use IsValidHeapObject if checking both
+ // heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value);
// Checks whether an address/object in a space.
@@ -998,7 +992,7 @@ class Heap {
// with off-heap Addresses.
bool InSpaceSlow(Address addr, AllocationSpace space);
- static inline Heap* FromWritableHeapObject(const HeapObject obj);
+ static inline Heap* FromWritableHeapObject(HeapObject obj);
// ===========================================================================
// Object statistics tracking. ===============================================
@@ -1042,23 +1036,21 @@ class Heap {
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
- V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
+ V8_EXPORT_PRIVATE static size_t HeapSizeFromPhysicalMemory(
+ uint64_t physical_memory);
+ V8_EXPORT_PRIVATE static void GenerationSizesFromHeapSize(
+ size_t heap_size, size_t* young_generation_size,
+ size_t* old_generation_size);
+ V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromOldGenerationSize(
+ size_t old_generation_size);
+ V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromSemiSpaceSize(
+ size_t semi_space_size);
+ V8_EXPORT_PRIVATE static size_t SemiSpaceSizeFromYoungGenerationSize(
+ size_t young_generation_size);
+ V8_EXPORT_PRIVATE static size_t MinYoungGenerationSize();
+ V8_EXPORT_PRIVATE static size_t MinOldGenerationSize();
+ V8_EXPORT_PRIVATE static size_t MaxOldGenerationSize(
uint64_t physical_memory);
-
- static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
- const uint64_t min_physical_memory = 512 * MB;
- const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
-
- uint64_t capped_physical_memory =
- Max(Min(physical_memory, max_physical_memory), min_physical_memory);
- // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
- size_t semi_space_size_in_kb =
- static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
- (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
- (max_physical_memory - min_physical_memory) +
- kMinSemiSpaceSizeInKB);
- return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
- }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -1185,6 +1177,11 @@ class Heap {
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
+ // We allow incremental marking to overshoot the V8 and global allocation
+ // limit for performace reasons. If the overshoot is too large then we are
+ // more eager to finalize incremental marking.
+ bool AllocationLimitOvershotByLargeMargin();
+
// ===========================================================================
// Prologue/epilogue callback methods.========================================
// ===========================================================================
@@ -1655,26 +1652,6 @@ class Heap {
OldGenerationObjectsAndPromotedExternalMemorySize());
}
- // We allow incremental marking to overshoot the allocation limit for
- // performace reasons. If the overshoot is too large then we are more
- // eager to finalize incremental marking.
- inline bool AllocationLimitOvershotByLargeMargin() {
- // This guards against too eager finalization in small heaps.
- // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
- size_t kMarginForSmallHeaps = 32u * MB;
- if (old_generation_allocation_limit_ >=
- OldGenerationObjectsAndPromotedExternalMemorySize())
- return false;
- uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
- old_generation_allocation_limit_;
- // Overshoot margin is 50% of allocation limit or half-way to the max heap
- // with special handling of small heaps.
- uint64_t margin =
- Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
- (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
- return overshoot >= margin;
- }
-
void UpdateTotalGCTime(double duration);
bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
@@ -1708,6 +1685,8 @@ class Heap {
return old_generation_allocation_limit_;
}
+ size_t global_allocation_limit() const { return global_allocation_limit_; }
+
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
@@ -1816,18 +1795,25 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
+ // These limits are initialized in Heap::ConfigureHeap based on the resource
+ // constraints and flags.
size_t code_range_size_ = 0;
- size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
- size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
- size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
- // TODO(mlippautz): Clarify whether this should be take some embedder
+ size_t max_semi_space_size_ = 0;
+ size_t initial_semispace_size_ = 0;
+ // Full garbage collections can be skipped if the old generation size
+ // is below this threshold.
+ size_t min_old_generation_size_ = 0;
+ // If the old generation size exceeds this limit, then V8 will
+ // crash with out-of-memory error.
+ size_t max_old_generation_size_ = 0;
+ // TODO(mlippautz): Clarify whether this should take some embedder
// configurable limit into account.
- size_t max_global_memory_size_ =
- Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
- static_cast<uint64_t>(max_old_generation_size_) * 2);
- size_t initial_max_old_generation_size_;
- size_t initial_max_old_generation_size_threshold_;
- size_t initial_old_generation_size_;
+ size_t min_global_memory_size_ = 0;
+ size_t max_global_memory_size_ = 0;
+
+ size_t initial_max_old_generation_size_ = 0;
+ size_t initial_max_old_generation_size_threshold_ = 0;
+ size_t initial_old_generation_size_ = 0;
bool old_generation_size_configured_ = false;
size_t maximum_committed_ = 0;
size_t old_generation_capacity_after_bootstrap_ = 0;
@@ -1861,8 +1847,6 @@ class Heap {
// and after context disposal.
int number_of_disposed_maps_ = 0;
- ReadOnlyHeap* read_only_heap_ = nullptr;
-
NewSpace* new_space_ = nullptr;
OldSpace* old_space_ = nullptr;
CodeSpace* code_space_ = nullptr;
@@ -1932,8 +1916,8 @@ class Heap {
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
- size_t old_generation_allocation_limit_;
- size_t global_allocation_limit_;
+ size_t old_generation_allocation_limit_ = 0;
+ size_t global_allocation_limit_ = 0;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
@@ -2034,9 +2018,10 @@ class Heap {
// Currently set GC callback flags that are used to pass information between
// the embedder and V8's GC.
- GCCallbackFlags current_gc_callback_flags_;
+ GCCallbackFlags current_gc_callback_flags_ =
+ GCCallbackFlags::kNoGCCallbackFlags;
- bool is_current_gc_forced_;
+ bool is_current_gc_forced_ = false;
ExternalStringTable external_string_table_;
@@ -2082,7 +2067,7 @@ class Heap {
friend class ConcurrentMarking;
friend class GCCallbacksScope;
friend class GCTracer;
- friend class HeapIterator;
+ friend class HeapObjectIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
@@ -2115,9 +2100,6 @@ class Heap {
// Used in cctest.
friend class heap::HeapTester;
- FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
- FRIEND_TEST(HeapTest, ExternalLimitDefault);
- FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2245,56 +2227,56 @@ class VerifySmisVisitor : public RootVisitor {
};
// Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old space, code space and optionally read only space. Returns each
-// space in turn, and null when it is done.
-class V8_EXPORT_PRIVATE PagedSpaces {
+// space, old space and code space. Returns each space in turn, and null when it
+// is done.
+class V8_EXPORT_PRIVATE PagedSpaceIterator {
public:
- explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
- PagedSpace* next();
+ explicit PagedSpaceIterator(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
+ PagedSpace* Next();
private:
Heap* heap_;
int counter_;
};
-
-class SpaceIterator : public Malloced {
+class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
public:
explicit SpaceIterator(Heap* heap);
virtual ~SpaceIterator();
- bool has_next();
- Space* next();
+ bool HasNext();
+ Space* Next();
private:
Heap* heap_;
int current_space_; // from enum AllocationSpace.
};
-// A HeapIterator provides iteration over the entire non-read-only heap. It
-// aggregates the specific iterators for the different spaces as these can only
-// iterate over one space only.
+// A HeapObjectIterator provides iteration over the entire non-read-only heap.
+// It aggregates the specific iterators for the different spaces as these can
+// only iterate over one space only.
//
-// HeapIterator ensures there is no allocation during its lifetime (using an
-// embedded DisallowHeapAllocation instance).
+// HeapObjectIterator ensures there is no allocation during its lifetime (using
+// an embedded DisallowHeapAllocation instance).
//
-// HeapIterator can skip free list nodes (that is, de-allocated heap objects
-// that still remain in the heap). As implementation of free nodes filtering
-// uses GC marks, it can't be used during MS/MC GC phases. Also, it is forbidden
-// to interrupt iteration in this mode, as this will leave heap objects marked
-// (and thus, unusable).
+// HeapObjectIterator can skip free list nodes (that is, de-allocated heap
+// objects that still remain in the heap). As implementation of free nodes
+// filtering uses GC marks, it can't be used during MS/MC GC phases. Also, it is
+// forbidden to interrupt iteration in this mode, as this will leave heap
+// objects marked (and thus, unusable).
//
-// See ReadOnlyHeapIterator if you need to iterate over read-only space objects,
-// or CombinedHeapIterator if you need to iterate over both heaps.
-class V8_EXPORT_PRIVATE HeapIterator {
+// See ReadOnlyHeapObjectIterator if you need to iterate over read-only space
+// objects, or CombinedHeapObjectIterator if you need to iterate over both
+// heaps.
+class V8_EXPORT_PRIVATE HeapObjectIterator {
public:
enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
- explicit HeapIterator(Heap* heap,
- HeapObjectsFiltering filtering = kNoFiltering);
- ~HeapIterator();
+ explicit HeapObjectIterator(Heap* heap,
+ HeapObjectsFiltering filtering = kNoFiltering);
+ ~HeapObjectIterator();
- HeapObject next();
+ HeapObject Next();
private:
HeapObject NextObject();
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4a901dc17a..2980bdc8d4 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -37,14 +37,14 @@ using IncrementalMarkingMarkingVisitor =
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
- Heap* heap = incremental_marking_.heap();
+ Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
heap->isolate(),
RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
- incremental_marking_.AdvanceOnAllocation();
+ incremental_marking_->AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
- incremental_marking_.EnsureBlackAllocated(addr, size);
+ incremental_marking_->EnsureBlackAllocated(addr, size);
}
IncrementalMarking::IncrementalMarking(
@@ -64,8 +64,8 @@ IncrementalMarking::IncrementalMarking(
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE),
- new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
- old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
+ new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
+ old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
DCHECK_NOT_NULL(marking_worklist_);
SetState(STOPPED);
}
@@ -246,6 +246,10 @@ bool IncrementalMarking::CanBeActivated() {
!heap_->isolate()->serializer_enabled();
}
+bool IncrementalMarking::IsBelowActivationThresholds() const {
+ return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
+ heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
+}
void IncrementalMarking::Deactivate() {
DeactivateIncrementalWriteBarrier();
@@ -253,16 +257,23 @@ void IncrementalMarking::Deactivate() {
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
- int old_generation_size_mb =
- static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
- int old_generation_limit_mb =
- static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+ const size_t old_generation_size_mb =
+ heap()->OldGenerationSizeOfObjects() / MB;
+ const size_t old_generation_limit_mb =
+ heap()->old_generation_allocation_limit() / MB;
+ const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
+ const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
- "slack %dMB\n",
+ "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
+ "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
Heap::GarbageCollectionReasonToString(gc_reason),
old_generation_size_mb, old_generation_limit_mb,
- Max(0, old_generation_limit_mb - old_generation_size_mb));
+ old_generation_size_mb > old_generation_limit_mb
+ ? 0
+ : old_generation_limit_mb - old_generation_size_mb,
+ global_size_mb, global_limit_mb,
+ global_size_mb > global_limit_mb ? 0
+ : global_limit_mb - global_size_mb);
}
DCHECK(FLAG_incremental_marking);
DCHECK(state_ == STOPPED);
@@ -827,8 +838,8 @@ void IncrementalMarking::Stop() {
}
SpaceIterator it(heap_);
- while (it.has_next()) {
- Space* space = it.next();
+ while (it.HasNext()) {
+ Space* space = it.Next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
} else {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 7284034191..74bb7cfd5a 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -79,9 +79,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static constexpr double kMaxStepSizeInMs = 5;
#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
+ static constexpr size_t kV8ActivationThreshold = 8 * MB;
+ static constexpr size_t kGlobalActivationThreshold = 16 * MB;
#else
- static const intptr_t kActivationThreshold = 0;
+ static constexpr size_t kV8ActivationThreshold = 0;
+ static constexpr size_t kGlobalActivationThreshold = 0;
#endif
#ifdef V8_CONCURRENT_MARKING
@@ -248,17 +250,19 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// generation.
void EnsureBlackAllocated(Address allocated, size_t size);
+ bool IsBelowActivationThresholds() const;
+
private:
class Observer : public AllocationObserver {
public:
- Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
+ Observer(IncrementalMarking* incremental_marking, intptr_t step_size)
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override;
private:
- IncrementalMarking& incremental_marking_;
+ IncrementalMarking* incremental_marking_;
};
void StartMarking();
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index 1945e3275a..001f40193a 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -26,8 +26,12 @@ void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
}
}
+void ItemParallelJob::Task::WillRunOnForeground() {
+ runner_ = Runner::kForeground;
+}
+
void ItemParallelJob::Task::RunInternal() {
- RunInParallel();
+ RunInParallel(runner_);
on_finish_->Signal();
}
@@ -95,6 +99,7 @@ void ItemParallelJob::Run() {
// Contribute on main thread.
DCHECK(main_task);
+ main_task->WillRunOnForeground();
main_task->Run();
// Wait for background tasks.
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 54f09b87b5..0b739f8987 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -65,10 +65,11 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
+ enum class Runner { kForeground, kBackground };
explicit Task(Isolate* isolate);
~Task() override = default;
- virtual void RunInParallel() = 0;
+ virtual void RunInParallel(Runner runner) = 0;
protected:
// Retrieves a new item that needs to be processed. Returns |nullptr| if
@@ -99,13 +100,14 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
// processing, e.g. scavenging).
void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index);
-
+ void WillRunOnForeground();
// We don't allow overriding this method any further.
void RunInternal() final;
std::vector<Item*>* items_ = nullptr;
size_t cur_index_ = 0;
size_t items_considered_ = 0;
+ Runner runner_ = Runner::kBackground;
base::Semaphore* on_finish_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(Task);
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 03be1100b1..3cd6620083 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -156,7 +156,7 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
- LargeObjectIterator it(lo_space);
+ LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
obj.Iterate(this);
@@ -456,6 +456,14 @@ void MarkCompactCollector::TearDown() {
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
+
+ if (FLAG_trace_evacuation_candidates) {
+ PrintIsolate(
+ isolate(),
+ "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
+ p->area_size() - p->allocated_bytes(), p->FreeListsLength());
+ }
+
p->MarkEvacuationCandidate();
evacuation_candidates_.push_back(p);
}
@@ -473,6 +481,9 @@ bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
DCHECK(evacuation_candidates_.empty());
+ if (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())
+ return false;
+
CollectEvacuationCandidates(heap()->old_space());
if (FLAG_compact_code_space) {
@@ -513,7 +524,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
- ReadOnlyHeapIterator iterator(space);
+ ReadOnlyHeapObjectIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
@@ -536,7 +547,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
+ LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
@@ -567,6 +578,8 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
+ heap()->tracer()->NotifySweepingCompleted();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
FullEvacuationVerifier verifier(heap());
@@ -629,6 +642,27 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
+ const bool in_standard_path =
+ !(FLAG_manual_evacuation_candidates_selection ||
+ FLAG_stress_compaction_random || FLAG_stress_compaction ||
+ FLAG_always_compact);
+ // Those variables will only be initialized if |in_standard_path|, and are not
+ // used otherwise.
+ size_t max_evacuated_bytes;
+ int target_fragmentation_percent;
+ size_t free_bytes_threshold;
+ if (in_standard_path) {
+ // We use two conditions to decide whether a page qualifies as an evacuation
+ // candidate, or not:
+ // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
+ // between live bytes and capacity of this page (= area).
+ // * Evacuation quota: A global quota determining how much bytes should be
+ // compacted.
+ ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
+ &max_evacuated_bytes);
+ free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
+ }
+
// Pairs of (live_bytes_in_page, page).
using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
@@ -652,7 +686,15 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
- pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ if (in_standard_path) {
+ // Only the pages with at more than |free_bytes_threshold| free bytes are
+ // considered for evacuation.
+ if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
+ } else {
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
}
int candidate_count = 0;
@@ -691,25 +733,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
} else {
// The following approach determines the pages that should be evacuated.
//
- // We use two conditions to decide whether a page qualifies as an evacuation
- // candidate, or not:
- // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
- // between live bytes and capacity of this page (= area).
- // * Evacuation quota: A global quota determining how much bytes should be
- // compacted.
- //
- // The algorithm sorts all pages by live bytes and then iterates through
- // them starting with the page with the most free memory, adding them to the
- // set of evacuation candidates as long as both conditions (fragmentation
- // and quota) hold.
- size_t max_evacuated_bytes;
- int target_fragmentation_percent;
- ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
- &max_evacuated_bytes);
-
- const size_t free_bytes_threshold =
- target_fragmentation_percent * (area_size / 100);
-
// Sort pages from the most free to the least free, then select
// the first n pages for evacuation such that:
// - the total size of evacuated objects does not exceed the specified
@@ -722,10 +745,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
for (size_t i = 0; i < pages.size(); i++) {
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
- size_t free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
- ((free_bytes >= free_bytes_threshold) &&
- ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
+ ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
candidate_count++;
total_live_bytes += live_bytes;
}
@@ -735,9 +756,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
"fragmentation_limit_kb=%zu "
"fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
- space->name(), free_bytes / KB, free_bytes_threshold / KB,
- target_fragmentation_percent, total_live_bytes / KB,
- max_evacuated_bytes / KB);
+ space->name(), (area_size - live_bytes) / KB,
+ free_bytes_threshold / KB, target_fragmentation_percent,
+ total_live_bytes / KB, max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
@@ -807,9 +828,9 @@ void MarkCompactCollector::Prepare() {
StartCompaction();
}
- PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(heap());
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->PrepareForMarkCompact();
}
heap()->account_external_memory_concurrently_freed();
@@ -1364,8 +1385,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- object.map_slot().Relaxed_Store(
- MapWord::FromForwardingAddress(actual).ToMap());
+ object.set_map_word(MapWord::FromForwardingAddress(actual));
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1463,7 +1483,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
- if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
+ if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
DCHECK(object.map_word().IsForwardingAddress());
return true;
@@ -2084,7 +2104,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
UncompiledData::Initialize(
uncompiled_data, inferred_name, start_position, end_position,
- kFunctionLiteralIdInvalid,
[](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
@@ -2731,6 +2750,7 @@ class Evacuator : public Malloced {
inline void Finalize();
virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
+ virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
@@ -2819,6 +2839,10 @@ class FullEvacuator : public Evacuator {
return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
}
+ GCTracer::Scope::ScopeId GetTracingScope() override {
+ return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
+ }
+
inline void Finalize() {
Evacuator::Finalize();
@@ -2909,16 +2933,24 @@ class PageEvacuationTask : public ItemParallelJob::Task {
evacuator_(evacuator),
tracer_(isolate->heap()->tracer()) {}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(tracer_, evacuator_->GetTracingScope());
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
+ ProcessItems();
+ }
+ }
+
+ private:
+ void ProcessItems() {
EvacuationItem* item = nullptr;
while ((item = GetItem<EvacuationItem>()) != nullptr) {
evacuator_->EvacuatePage(item->chunk());
item->MarkFinished();
}
}
-
- private:
Evacuator* evacuator_;
GCTracer* tracer_;
};
@@ -3183,7 +3215,7 @@ void MarkCompactCollector::Evacuate() {
sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- DCHECK_EQ(OLD_SPACE, p->owner()->identity());
+ DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
}
}
@@ -3191,7 +3223,7 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
+ sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
@@ -3218,24 +3250,35 @@ class UpdatingItem : public ItemParallelJob::Item {
class PointersUpdatingTask : public ItemParallelJob::Task {
public:
- explicit PointersUpdatingTask(Isolate* isolate,
- GCTracer::BackgroundScope::ScopeId scope)
+ explicit PointersUpdatingTask(
+ Isolate* isolate, GCTracer::Scope::ScopeId scope,
+ GCTracer::BackgroundScope::ScopeId background_scope)
: ItemParallelJob::Task(isolate),
tracer_(isolate->heap()->tracer()),
- scope_(scope) {}
+ scope_(scope),
+ background_scope_(background_scope) {}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(tracer_, scope_);
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(tracer_, scope_);
+ UpdatePointers();
+ } else {
+ TRACE_BACKGROUND_GC(tracer_, background_scope_);
+ UpdatePointers();
+ }
+ }
+
+ private:
+ void UpdatePointers() {
UpdatingItem* item = nullptr;
while ((item = GetItem<UpdatingItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
}
-
- private:
GCTracer* tracer_;
- GCTracer::BackgroundScope::ScopeId scope_;
+ GCTracer::Scope::ScopeId scope_;
+ GCTracer::BackgroundScope::ScopeId background_scope_;
};
template <typename MarkingState>
@@ -3651,7 +3694,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_tasks + num_ephemeron_table_updating_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(),
+ isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.AddItem(new EphemeronTableUpdatingItem(heap()));
@@ -3684,7 +3727,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(),
+ isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
@@ -4194,8 +4237,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::BackgroundScope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
+ isolate(), GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
{
@@ -4498,9 +4542,30 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
Page::kPageSize);
}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(collector_->heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(
+ collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ ProcessItems();
+ }
+ }
+
+ void MarkObject(Object object) {
+ if (!Heap::InYoungGeneration(object)) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (marking_state_->WhiteToGrey(heap_object)) {
+ const int size = visitor_.Visit(heap_object);
+ IncrementLiveBytes(heap_object, size);
+ }
+ }
+
+ private:
+ void ProcessItems() {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
@@ -4519,17 +4584,6 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
static_cast<void*>(this), marking_time);
}
}
-
- void MarkObject(Object object) {
- if (!Heap::InYoungGeneration(object)) return;
- HeapObject heap_object = HeapObject::cast(object);
- if (marking_state_->WhiteToGrey(heap_object)) {
- const int size = visitor_.Visit(heap_object);
- IncrementLiveBytes(heap_object, size);
- }
- }
-
- private:
void EmptyLocalMarkingWorklist() {
HeapObject object;
while (marking_worklist_.Pop(&object)) {
@@ -4761,6 +4815,10 @@ class YoungGenerationEvacuator : public Evacuator {
return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
}
+ GCTracer::Scope::ScopeId GetTracingScope() override {
+ return GCTracer::Scope::MINOR_MC_EVACUATE_COPY_PARALLEL;
+ }
+
protected:
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 033f4fc6e9..2a63896242 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -1079,7 +1079,7 @@ class ObjectStatsVisitor {
namespace {
void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
- CombinedHeapIterator iterator(heap);
+ CombinedHeapObjectIterator iterator(heap);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
visitor->Visit(obj, obj.Size());
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index d96cded09a..ba0bfa2415 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -12,6 +12,7 @@
#include "src/heap/mark-compact.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
@@ -71,9 +72,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
- HeapObject host, MapWordSlot map_slot) {
+ HeapObject host) {
DCHECK(!host.map_word().IsForwardingAddress());
- static_cast<ConcreteVisitor*>(this)->VisitPointer(host, ObjectSlot(map_slot));
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
}
#define VISIT(TypeName, Type) \
@@ -88,8 +89,9 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
"concurrent marker"); \
} \
int size = TypeName::BodyDescriptor::SizeOf(map, object); \
- if (visitor->ShouldVisitMapPointer()) \
- visitor->VisitMapPointer(object, object.map_slot()); \
+ if (visitor->ShouldVisitMapPointer()) { \
+ visitor->VisitMapPointer(object); \
+ } \
TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
return static_cast<ResultType>(size); \
}
@@ -109,7 +111,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(size);
}
@@ -120,8 +122,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object.map_slot());
+ if (visitor->ShouldVisitMapPointer()) {
+ visitor->VisitMapPointer(object);
+ }
JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -132,8 +135,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::BodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object.map_slot());
+ if (visitor->ShouldVisitMapPointer()) {
+ visitor->VisitMapPointer(object);
+ }
JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -145,7 +149,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
@@ -157,7 +161,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(object.size());
}
@@ -169,7 +173,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = WeakArrayBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
return size;
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 9ebd94427e..a5c291458f 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -54,12 +54,15 @@ namespace internal {
V(SmallOrderedHashMap, SmallOrderedHashMap) \
V(SmallOrderedHashSet, SmallOrderedHashSet) \
V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \
+ V(SourceTextModule, SourceTextModule) \
V(Symbol, Symbol) \
+ V(SyntheticModule, SyntheticModule) \
V(ThinString, ThinString) \
V(TransitionArray, TransitionArray) \
V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
V(WasmCapiFunctionData, WasmCapiFunctionData) \
+ V(WasmIndirectFunctionTable, WasmIndirectFunctionTable) \
V(WasmInstanceObject, WasmInstanceObject)
#define FORWARD_DECLARE(TypeName, Type) class Type;
@@ -91,7 +94,7 @@ class HeapVisitor : public ObjectVisitor {
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
- V8_INLINE void VisitMapPointer(HeapObject host, MapWordSlot map_slot);
+ V8_INLINE void VisitMapPointer(HeapObject host);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
diff --git a/deps/v8/src/heap/read-only-heap-inl.h b/deps/v8/src/heap/read-only-heap-inl.h
new file mode 100644
index 0000000000..c725b4bca8
--- /dev/null
+++ b/deps/v8/src/heap/read-only-heap-inl.h
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_READ_ONLY_HEAP_INL_H_
+#define V8_HEAP_READ_ONLY_HEAP_INL_H_
+
+#include "src/heap/read-only-heap.h"
+
+#include "src/execution/isolate-utils-inl.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
+#ifdef V8_SHARED_RO_HEAP
+ // This fails if we are creating heap objects and the roots haven't yet been
+ // copied into the read-only heap or it has been cleared for testing.
+ if (shared_ro_heap_ != nullptr && shared_ro_heap_->init_complete_) {
+ return ReadOnlyRoots(shared_ro_heap_->read_only_roots_);
+ }
+#endif
+ return ReadOnlyRoots(GetHeapFromWritableObject(object));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_READ_ONLY_HEAP_INL_H_
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 1021bc147f..c325aea7e6 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -6,6 +6,7 @@
#include <cstring>
+#include "src/base/lsan.h"
#include "src/base/once.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -20,29 +21,53 @@ namespace internal {
#ifdef V8_SHARED_RO_HEAP
V8_DECLARE_ONCE(setup_ro_heap_once);
-ReadOnlyHeap* shared_ro_heap = nullptr;
+ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
#endif
// static
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
- // Make sure we are only sharing read-only space when deserializing. Otherwise
- // we would be trying to create heap objects inside an already initialized
- // read-only space. Use ClearSharedHeapForTest if you need a new read-only
- // space.
- DCHECK_IMPLIES(shared_ro_heap != nullptr, des != nullptr);
-
- base::CallOnce(&setup_ro_heap_once, [isolate, des]() {
- shared_ro_heap = CreateAndAttachToIsolate(isolate);
- if (des != nullptr) shared_ro_heap->DeseralizeIntoIsolate(isolate, des);
- });
-
- isolate->heap()->SetUpFromReadOnlyHeap(shared_ro_heap);
+ bool call_once_ran = false;
+ base::Optional<Checksum> des_checksum;
+#ifdef DEBUG
+ if (des != nullptr) des_checksum = des->GetChecksum();
+#endif // DEBUG
+
+ base::CallOnce(&setup_ro_heap_once,
+ [isolate, des, des_checksum, &call_once_ran]() {
+ USE(des_checksum);
+ shared_ro_heap_ = CreateAndAttachToIsolate(isolate);
+ if (des != nullptr) {
+#ifdef DEBUG
+ shared_ro_heap_->read_only_blob_checksum_ = des_checksum;
+#endif // DEBUG
+ shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
+ }
+ call_once_ran = true;
+ });
+
+ USE(call_once_ran);
+ USE(des_checksum);
+#ifdef DEBUG
+ const base::Optional<Checksum> last_checksum =
+ shared_ro_heap_->read_only_blob_checksum_;
+ if (last_checksum || des_checksum) {
+ // The read-only heap was set up from a snapshot. Make sure it's the always
+ // the same snapshot.
+ CHECK_EQ(last_checksum, des_checksum);
+ } else {
+ // The read-only heap objects were created. Make sure this happens only
+ // once, during this call.
+ CHECK(call_once_ran);
+ }
+#endif // DEBUG
+
+ isolate->SetUpFromReadOnlyHeap(shared_ro_heap_);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
- std::memcpy(isolate_ro_roots, shared_ro_heap->read_only_roots_,
+ std::memcpy(isolate_ro_roots, shared_ro_heap_->read_only_roots_,
kEntriesCount * sizeof(Address));
}
#else
@@ -66,7 +91,7 @@ void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
// static
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
- isolate->heap()->SetUpFromReadOnlyHeap(ro_heap);
+ isolate->SetUpFromReadOnlyHeap(ro_heap);
return ro_heap;
}
@@ -77,6 +102,9 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
+ // N.B. Since pages are manually allocated with mmap, Lsan doesn't track
+ // their pointers. Seal explicitly ignores the necessary objects.
+ LSAN_IGNORE_OBJECT(this);
read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
@@ -94,30 +122,17 @@ void ReadOnlyHeap::OnHeapTearDown() {
// static
void ReadOnlyHeap::ClearSharedHeapForTest() {
#ifdef V8_SHARED_RO_HEAP
- DCHECK_NOT_NULL(shared_ro_heap);
+ DCHECK_NOT_NULL(shared_ro_heap_);
// TODO(v8:7464): Just leak read-only space for now. The paged-space heap
// is null so there isn't a nice way to do this.
- delete shared_ro_heap;
- shared_ro_heap = nullptr;
+ shared_ro_heap_ = nullptr;
setup_ro_heap_once = 0;
#endif
}
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
- return Page::FromAddress(object.ptr())->owner()->identity() == RO_SPACE;
-}
-
-// static
-ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
-#ifdef V8_SHARED_RO_HEAP
- // This fails if we are creating heap objects and the roots haven't yet been
- // copied into the read-only heap or it has been cleared for testing.
- if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
- return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
- }
-#endif
- return ReadOnlyRoots(GetHeapFromWritableObject(object));
+ return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
Object* ReadOnlyHeap::ExtendReadOnlyObjectCache() {
@@ -134,15 +149,15 @@ bool ReadOnlyHeap::read_only_object_cache_is_initialized() const {
return read_only_object_cache_.size() > 0;
}
-ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap)
- : ReadOnlyHeapIterator(ro_heap->read_only_space()) {}
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
+ : ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
-ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlySpace* ro_space)
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
: ro_space_(ro_space),
current_page_(ro_space->first_page()),
current_addr_(current_page_->area_start()) {}
-HeapObject ReadOnlyHeapIterator::Next() {
+HeapObject ReadOnlyHeapObjectIterator::Next() {
if (current_page_ == nullptr) {
return HeapObject();
}
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index 697c9e26ef..4c1da62a15 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -5,7 +5,10 @@
#ifndef V8_HEAP_READ_ONLY_HEAP_H_
#define V8_HEAP_READ_ONLY_HEAP_H_
+#include <utility>
+
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
#include "src/roots/roots.h"
@@ -44,7 +47,8 @@ class ReadOnlyHeap final {
// Gets read-only roots from an appropriate root list: shared read-only root
// list if the shared read-only heap has been initialized or the isolate
// specific roots table.
- V8_EXPORT_PRIVATE static ReadOnlyRoots GetReadOnlyRoots(HeapObject object);
+ V8_EXPORT_PRIVATE inline static ReadOnlyRoots GetReadOnlyRoots(
+ HeapObject object);
// Clears any shared read-only heap artifacts for testing, forcing read-only
// heap to be re-created on next set up.
@@ -60,6 +64,8 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
+ using Checksum = std::pair<uint32_t, uint32_t>;
+
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
// Runs the read-only deserailizer and calls InitFromIsolate to complete
@@ -76,18 +82,25 @@ class ReadOnlyHeap final {
std::vector<Object> read_only_object_cache_;
#ifdef V8_SHARED_RO_HEAP
+#ifdef DEBUG
+ // The checksum of the blob the read-only heap was deserialized from, if any.
+ base::Optional<Checksum> read_only_blob_checksum_;
+#endif // DEBUG
+
Address read_only_roots_[kEntriesCount];
-#endif
+
+ V8_EXPORT_PRIVATE static ReadOnlyHeap* shared_ro_heap_;
+#endif // V8_SHARED_RO_HEAP
explicit ReadOnlyHeap(ReadOnlySpace* ro_space) : read_only_space_(ro_space) {}
DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
};
// This class enables iterating over all read-only heap objects.
-class V8_EXPORT_PRIVATE ReadOnlyHeapIterator {
+class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
public:
- explicit ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap);
- explicit ReadOnlyHeapIterator(ReadOnlySpace* ro_space);
+ explicit ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap);
+ explicit ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space);
HeapObject Next();
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index cd2344b349..ea7fe0149b 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,8 +5,8 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
+#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
-#include "src/common/v8memory.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
@@ -309,7 +309,7 @@ class UpdateTypedSlotHelper {
SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
- Memory<Address>(entry_address) = code.entry();
+ base::Memory<Address>(entry_address) = code.entry();
}
return result;
}
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 50dc5f25c9..9c605f7089 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -97,8 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization.
HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
}
#endif
}
@@ -110,9 +109,8 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
- Object old = source.map_slot().Release_CompareAndSwap(
- map, MapWord::FromForwardingAddress(target).ToMap());
- if (old != map) {
+ if (!source.synchronized_compare_and_swap_map_word(
+ MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
// Other task migrated the object.
return false;
}
@@ -215,9 +213,9 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
FLAG_young_generation_large_objects &&
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
- MemoryChunk::FromHeapObject(object)->owner()->identity());
- if (object.map_slot().Release_CompareAndSwap(
- map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
+ MemoryChunk::FromHeapObject(object)->owner_identity());
+ if (object.synchronized_compare_and_swap_map_word(
+ MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
@@ -314,8 +312,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(first).ToMap());
+ object.synchronized_set_map_word(MapWord::FromForwardingAddress(first));
return REMOVE_SLOT;
}
@@ -324,16 +321,15 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObject target = first_word.ToForwardingAddress();
HeapObjectReference::Update(slot, target);
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(target).ToMap());
+ object.synchronized_set_map_word(MapWord::FromForwardingAddress(target));
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
Map::ObjectFieldsFrom(map.visitor_id()));
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
+ object.synchronized_set_map_word(
+ MapWord::FromForwardingAddress(slot.ToHeapObject()));
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index c7666b7da7..70b514142f 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -41,10 +41,20 @@ class ScavengingTask final : public ItemParallelJob::Task {
scavenger_(scavenger),
barrier_(barrier) {}
- void RunInParallel() final {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ void RunInParallel(Runner runner) final {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ ProcessItems();
+ }
+ }
+
+ private:
+ void ProcessItems() {
double scavenging_time = 0.0;
{
barrier_->Start();
@@ -66,8 +76,6 @@ class ScavengingTask final : public ItemParallelJob::Task {
scavenger_->bytes_copied(), scavenger_->bytes_promoted());
}
}
-
- private:
Heap* const heap_;
Scavenger* const scavenger_;
OneshotBarrier* const barrier_;
@@ -413,7 +421,7 @@ void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
- AllocationSpace space = page->owner()->identity();
+ AllocationSpace space = page->owner_identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper()->AddPage(
space, reinterpret_cast<Page*>(page),
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 458fd819ae..a936521a7e 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -29,7 +29,6 @@
#include "src/objects/lookup-cache.h"
#include "src/objects/map.h"
#include "src/objects/microtask.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table.h"
@@ -37,11 +36,15 @@
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
+#include "src/objects/source-text-module.h"
#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
+#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/wasm/wasm-objects.h"
+#include "torque-generated/class-definitions-tq.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -485,7 +488,10 @@ bool Heap::CreateInitialMaps() {
uncompiled_data_with_preparse_data)
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
-
+ ALLOCATE_MAP(SOURCE_TEXT_MODULE_TYPE, SourceTextModule::kSize,
+ source_text_module)
+ ALLOCATE_MAP(SYNTHETIC_MODULE_TYPE, SyntheticModule::kSize,
+ synthetic_module)
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
@@ -870,10 +876,6 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_regexp_species_protector(*cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_iterator_protector(*cell);
Handle<Cell> string_length_overflow_cell = factory->NewCell(
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 308d4f51b1..3b4ed8d30a 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -42,9 +42,9 @@ PageRange::PageRange(Address start, Address limit)
}
// -----------------------------------------------------------------------------
-// SemiSpaceIterator
+// SemiSpaceObjectIterator
-HeapObject SemiSpaceIterator::Next() {
+HeapObject SemiSpaceObjectIterator::Next() {
while (current_ != limit_) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
@@ -63,9 +63,9 @@ HeapObject SemiSpaceIterator::Next() {
}
// -----------------------------------------------------------------------------
-// HeapObjectIterator
+// PagedSpaceObjectIterator
-HeapObject HeapObjectIterator::Next() {
+HeapObject PagedSpaceObjectIterator::Next() {
do {
HeapObject next_obj = FromCurrentPage();
if (!next_obj.is_null()) return next_obj;
@@ -73,7 +73,7 @@ HeapObject HeapObjectIterator::Next() {
return HeapObject();
}
-HeapObject HeapObjectIterator::FromCurrentPage() {
+HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
@@ -182,7 +182,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- category->set_free_list(&free_list_);
+ category->set_free_list(free_list());
added += category->available();
category->Relink();
});
@@ -204,13 +204,6 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
return false;
}
-bool MemoryChunk::HasHeaderSentinel(Address slot_addr) {
- Address base = BaseAddress(slot_addr);
- if (slot_addr < base + kHeaderSize) return false;
- return HeapObject::FromAddress(base) ==
- ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
-}
-
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
while (!HasHeaderSentinel(addr)) {
addr = BaseAddress(addr) - 1;
@@ -234,14 +227,21 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
MemoryChunk* from,
MemoryChunk* to,
size_t amount) {
+ DCHECK_NOT_NULL(from->owner());
+ DCHECK_NOT_NULL(to->owner());
base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
amount);
}
+AllocationSpace MemoryChunk::owner_identity() const {
+ if (InReadOnlySpace()) return RO_SPACE;
+ return owner()->identity();
+}
+
void Page::MarkNeverAllocateForTesting() {
- DCHECK(this->owner()->identity() != NEW_SPACE);
+ DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
SetFlag(NEVER_EVACUATE);
@@ -315,10 +315,6 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
- return top(type) ? top(type)->page() : nullptr;
-}
-
FreeList* FreeListCategory::owner() { return free_list_; }
bool FreeListCategory::is_linked() {
@@ -376,7 +372,7 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
- DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
+ DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
@@ -389,7 +385,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
- DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
+ DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 2c8cbdfc32..438308a346 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -8,6 +8,7 @@
#include <utility>
#include "src/base/bits.h"
+#include "src/base/lsan.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
@@ -44,9 +45,9 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
// ----------------------------------------------------------------------------
-// HeapObjectIterator
+// PagedSpaceObjectIterator
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(PagedSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
@@ -57,28 +58,28 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
#endif
}
-HeapObjectIterator::HeapObjectIterator(Page* page)
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(Page* page)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(reinterpret_cast<PagedSpace*>(page->owner())),
page_range_(page),
current_page_(page_range_.begin()) {
-#ifdef DEBUG
- Space* owner = page->owner();
+#ifdef V8_SHARED_RO_HEAP
// TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
// used to verify read-only space for non-shared builds.
-#ifdef V8_SHARED_RO_HEAP
- DCHECK_NE(owner->identity(), RO_SPACE);
-#endif
- // Do not access the heap of the read-only space.
- DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE ||
- owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE);
+ DCHECK(!page->InReadOnlySpace());
+#endif // V8_SHARED_RO_HEAP
+
+#ifdef DEBUG
+ AllocationSpace owner = page->owner_identity();
+ DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
+ owner == CODE_SPACE);
#endif // DEBUG
}
// We have hit the end of the page and should advance to the next block of
// objects. This happens at the end of the page.
-bool HeapObjectIterator::AdvanceToNextPage() {
+bool PagedSpaceObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
@@ -105,14 +106,14 @@ PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
- for (SpaceIterator it(heap_); it.has_next();) {
- it.next()->PauseAllocationObservers();
+ for (SpaceIterator it(heap_); it.HasNext();) {
+ it.Next()->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
- for (SpaceIterator it(heap_); it.has_next();) {
- it.next()->ResumeAllocationObservers();
+ for (SpaceIterator it(heap_); it.HasNext();) {
+ it.Next()->ResumeAllocationObservers();
}
}
@@ -539,10 +540,13 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
-Heap* MemoryChunk::synchronized_heap() {
- return reinterpret_cast<Heap*>(
- base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
+#ifdef THREAD_SANITIZER
+void MemoryChunk::SynchronizedHeapLoad() {
+ CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
+ reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
+ InReadOnlySpace());
}
+#endif
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
@@ -561,8 +565,7 @@ void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE ||
- owner()->identity() == CODE_LO_SPACE);
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
@@ -596,8 +599,7 @@ void MemoryChunk::SetReadAndExecutable() {
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE ||
- owner()->identity() == CODE_LO_SPACE);
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
@@ -688,16 +690,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Executability executable, Space* owner,
VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base);
-
DCHECK_EQ(base, chunk->address());
+ new (chunk) BasicMemoryChunk(size, area_start, area_end);
+ DCHECK(HasHeaderSentinel(area_start));
chunk->heap_ = heap;
- chunk->size_ = size;
- chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
- DCHECK(HasHeaderSentinel(area_start));
- chunk->area_start_ = area_start;
- chunk->area_end_ = area_end;
- chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
@@ -716,7 +713,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
- chunk->marking_bitmap_ = nullptr;
chunk->local_tracker_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
@@ -724,25 +720,18 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->external_backing_store_bytes_
[ExternalBackingStoreType::kExternalString] = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- chunk->categories_[i] = nullptr;
- }
+ chunk->categories_ = nullptr;
- chunk->AllocateMarkingBitmap();
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ 0);
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
- } else {
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
- 0);
+ chunk->SetFlag(READ_ONLY_HEAP);
}
- DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
- DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
- DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
-
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) {
@@ -768,11 +757,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
return chunk;
}
-Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
+Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
- DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- page->owner()->identity()),
- page->area_size());
+ DCHECK_EQ(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
+ page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
@@ -783,8 +772,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
return page;
}
-Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
- DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
bool in_to_space = (id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
Page* page = static_cast<Page*>(chunk);
@@ -829,24 +817,31 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
}
void Page::AllocateFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ DCHECK_NULL(categories_);
+ categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ DCHECK_NULL(categories_[i]);
categories_[i] = new FreeListCategory(
reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
}
}
void Page::InitializeFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
}
}
void Page::ReleaseFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- if (categories_[i] != nullptr) {
- delete categories_[i];
- categories_[i] = nullptr;
+ if (categories_ != nullptr) {
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ if (categories_[i] != nullptr) {
+ delete categories_[i];
+ categories_[i] = nullptr;
+ }
}
+ delete[] categories_;
+ categories_ = nullptr;
}
}
@@ -856,23 +851,21 @@ Page* Page::ConvertNewToOld(Page* old_page) {
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->SetFlags(0, static_cast<uintptr_t>(~0));
- Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
+ Page* new_page = old_space->InitializePage(old_page);
old_space->AddPage(new_page);
return new_page;
}
size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
+ if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
return high_water_mark_;
}
-bool MemoryChunk::InOldSpace() const {
- return owner()->identity() == OLD_SPACE;
-}
+bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
bool MemoryChunk::InLargeObjectSpace() const {
- return owner()->identity() == LO_SPACE;
+ return owner_identity() == LO_SPACE;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
@@ -1131,15 +1124,15 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
Address new_area_end) {
VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
- chunk->size_ -= bytes_to_free;
- chunk->area_end_ = new_area_end;
+ chunk->set_size(chunk->size() - bytes_to_free);
+ chunk->set_area_end(new_area_end);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
// Add guard page at the end.
size_t page_size = GetCommitPageSize();
- DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
+ DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
- reservation->SetPermissions(chunk->area_end_, page_size,
+ reservation->SetPermissions(chunk->area_end(), page_size,
PageAllocator::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
@@ -1181,7 +1174,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- chunk->ReleaseAllocatedMemory();
+ chunk->ReleaseAllAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
@@ -1191,7 +1184,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
- DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
+ DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
}
@@ -1251,7 +1244,7 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
chunk = AllocateChunk(size, size, executable, owner);
}
if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk, executable);
+ return owner->InitializePage(chunk);
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
@@ -1368,7 +1361,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-void MemoryChunk::ReleaseAllocatedMemory() {
+void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
if (mutex_ != nullptr) {
delete mutex_;
mutex_ = nullptr;
@@ -1377,20 +1370,29 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete page_protection_change_mutex_;
page_protection_change_mutex_ = nullptr;
}
+ if (code_object_registry_ != nullptr) {
+ delete code_object_registry_;
+ code_object_registry_ = nullptr;
+ }
+
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots();
+
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
- if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
- if (code_object_registry_ != nullptr) delete code_object_registry_;
+}
+void MemoryChunk::ReleaseAllAllocatedMemory() {
if (!IsLargePage()) {
Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories();
}
+
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
@@ -1408,7 +1410,7 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
- SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
+ SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
&slot_set_[type], nullptr, slot_set);
if (old_slot_set != nullptr) {
@@ -1527,23 +1529,10 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
-void MemoryChunk::AllocateMarkingBitmap() {
- DCHECK_NULL(marking_bitmap_);
- marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
-}
-
-void MemoryChunk::ReleaseMarkingBitmap() {
- DCHECK_NOT_NULL(marking_bitmap_);
- free(marking_bitmap_);
- marking_bitmap_ = nullptr;
-}
-
// -----------------------------------------------------------------------------
// PagedSpace implementation
void Space::CheckOffsetsAreConsistent() const {
- static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
- "ID offset inconsistent");
DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
}
@@ -1592,8 +1581,8 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
- Executability executable)
- : SpaceWithLinearArea(heap, space), executable_(executable) {
+ Executability executable, FreeList* free_list)
+ : SpaceWithLinearArea(heap, space, free_list), executable_(executable) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
@@ -1614,6 +1603,7 @@ void PagedSpace::RefillFreeList() {
identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
+ DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
{
@@ -1713,21 +1703,7 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
- // Check for pages that still contain free list entries. Bail out for smaller
- // categories.
- const int minimum_category =
- static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
- Page* page = free_list()->GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kMedium);
- if (!page && static_cast<int>(kSmall) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kSmall);
- if (!page && static_cast<int>(kTiny) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kTiny);
- if (!page && static_cast<int>(kTiniest) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kTiniest);
+ Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
RemovePage(page);
return page;
@@ -1769,9 +1745,9 @@ size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
void PagedSpace::ResetFreeList() {
for (Page* page : *this) {
- free_list_.EvictFreeListItems(page);
+ free_list_->EvictFreeListItems(page);
}
- DCHECK(free_list_.IsEmpty());
+ DCHECK(free_list_->IsEmpty());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
@@ -1934,8 +1910,8 @@ void PagedSpace::ReleasePage(Page* page) {
page));
DCHECK_EQ(page->owner(), this);
- free_list_.EvictFreeListItems(page);
- DCHECK(!free_list_.ContainsPageFreeListItems(page));
+ free_list_->EvictFreeListItems(page);
+ DCHECK(!free_list_->ContainsPageFreeListItems(page));
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
@@ -1972,7 +1948,7 @@ void PagedSpace::SetReadAndWritable() {
}
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
+ return std::unique_ptr<ObjectIterator>(new PagedSpaceObjectIterator(this));
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
@@ -1998,7 +1974,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
}
size_t new_node_size = 0;
- FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
+ FreeSpace new_node = free_list_->Allocate(size_in_bytes, &new_node_size);
if (new_node.is_null()) return false;
DCHECK_GE(new_node_size, size_in_bytes);
@@ -2055,7 +2031,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
@@ -2066,8 +2042,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// be in map space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(isolate->heap()->map_space()->Contains(map) ||
- ReadOnlyHeap::Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -2118,7 +2094,7 @@ void PagedSpace::VerifyLiveBytes() {
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
@@ -2138,7 +2114,7 @@ void PagedSpace::VerifyCountersAfterSweeping() {
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFiller()) {
@@ -2185,7 +2161,7 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE),
+ : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
@@ -2528,11 +2504,11 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
+ return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceIterator because verification doesn't assume
+// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify(Isolate* isolate) {
// The allocation pointer should be in the space or at the very end.
@@ -2560,8 +2536,7 @@ void NewSpace::Verify(Isolate* isolate) {
// be in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(heap()->map_space()->Contains(map) ||
- heap()->read_only_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap());
@@ -2633,6 +2608,9 @@ bool SemiSpace::Commit() {
DCHECK(!is_committed());
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
+ // Pages in the new spaces can be moved to the old space by the full
+ // collector. Therefore, they must be initialized with the same FreeList as
+ // old pages.
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
@@ -2890,16 +2868,14 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
}
#endif
-
// -----------------------------------------------------------------------------
-// SemiSpaceIterator implementation.
+// SemiSpaceObjectIterator implementation.
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
Initialize(space->first_allocatable_address(), space->top());
}
-
-void SemiSpaceIterator::Initialize(Address start, Address end) {
+void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
@@ -2925,19 +2901,22 @@ void FreeListCategory::Reset() {
set_prev(nullptr);
set_next(nullptr);
available_ = 0;
+ length_ = 0;
}
FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace node = top();
- if (node.is_null() || static_cast<size_t>(node.Size()) < minimum_size) {
+ DCHECK(!node.is_null());
+ if (static_cast<size_t>(node.Size()) < minimum_size) {
*node_size = 0;
return FreeSpace();
}
set_top(node.next());
*node_size = node.Size();
available_ -= *node_size;
+ length_--;
return node;
}
@@ -2951,12 +2930,13 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
if (size >= minimum_size) {
DCHECK_GE(available_, size);
available_ -= size;
+ length_--;
if (cur_node == top()) {
set_top(cur_node.next());
}
if (!prev_non_evac_node.is_null()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
- if (chunk->owner()->identity() == CODE_SPACE) {
+ if (chunk->owner_identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
}
prev_non_evac_node.set_next(cur_node.next());
@@ -2976,6 +2956,7 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
free_space.set_next(top());
set_top(free_space);
available_ += size_in_bytes;
+ length_++;
if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
owner()->AddCategory(this);
}
@@ -2983,17 +2964,14 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
void FreeListCategory::RepairFreeList(Heap* heap) {
+ Map free_space_map = ReadOnlyRoots(heap).free_space_map();
FreeSpace n = top();
while (!n.is_null()) {
- MapWordSlot map_location = n.map_slot();
- // We can't use .is_null() here because *map_location returns an
- // Object (for which "is null" is not defined, as it would be
- // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
- if (map_location.contains_value(kNullAddress)) {
- map_location.store(ReadOnlyRoots(heap).free_space_map());
+ ObjectSlot map_slot = n.map_slot();
+ if (map_slot.contains_value(kNullAddress)) {
+ map_slot.store(free_space_map);
} else {
- DCHECK(map_location.contains_value(
- ReadOnlyRoots(heap).free_space_map().ptr()));
+ DCHECK(map_slot.contains_value(free_space_map.ptr()));
}
n = n.next();
}
@@ -3004,21 +2982,50 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-FreeList::FreeList() : wasted_bytes_(0) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i] = nullptr;
+// ------------------------------------------------
+// Generic FreeList methods (alloc/free related)
+
+FreeList* FreeList::CreateFreeList() {
+ if (FLAG_gc_freelist_strategy == 1) {
+ return new FreeListFastAlloc();
+ } else if (FLAG_gc_freelist_strategy == 2) {
+ return new FreeListMany();
+ } else {
+ return new FreeListLegacy();
}
- Reset();
}
+FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
+ FreeListCategory* category = categories_[type];
+ if (category == nullptr) return FreeSpace();
+ FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ if (category->is_empty()) {
+ RemoveCategory(category);
+ }
+ return node;
+}
-void FreeList::Reset() {
- ForAllFreeListCategories(
- [](FreeListCategory* category) { category->Reset(); });
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i] = nullptr;
+FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
+ size_t minimum_size,
+ size_t* node_size) {
+ FreeListCategoryIterator it(this, type);
+ FreeSpace node;
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ node = current->SearchForNodeInList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ if (current->is_empty()) {
+ RemoveCategory(current);
+ }
+ return node;
+ }
}
- wasted_bytes_ = 0;
+ return node;
}
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
@@ -3026,7 +3033,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
page->DecreaseAllocatedBytes(size_in_bytes);
// Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < kMinBlockSize) {
+ if (size_in_bytes < min_block_size_) {
page->add_wasted_memory(size_in_bytes);
wasted_bytes_ += size_in_bytes;
return size_in_bytes;
@@ -3041,52 +3048,22 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return 0;
}
-FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
- }
- RemoveCategory(current);
- }
- return node;
-}
+// ------------------------------------------------
+// FreeListLegacy implementation
-FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
- size_t minimum_size, size_t* node_size) {
- if (categories_[type] == nullptr) return FreeSpace();
- FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- }
- return node;
-}
+FreeListLegacy::FreeListLegacy() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kHuge + 1;
+ last_category_ = kHuge;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
-FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
- size_t* node_size,
- size_t minimum_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->SearchForNodeInList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
- }
- if (current->is_empty()) {
- RemoveCategory(current);
- }
- }
- return node;
+ Reset();
}
-FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
+
+FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
// First try the allocation fast path: try to allocate the minimum element
@@ -3094,21 +3071,31 @@ FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
FreeListCategoryType type =
SelectFastAllocationFreeListCategoryType(size_in_bytes);
for (int i = type; i < kHuge && node.is_null(); i++) {
- node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
}
if (node.is_null()) {
// Next search the huge list for free list nodes. This takes linear time in
// the number of huge elements.
- node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
+ node = SearchForNodeInList(kHuge, size_in_bytes, node_size);
}
if (node.is_null() && type != kHuge) {
- // We didn't find anything in the huge list. Now search the best fitting
- // free list for a node that has at least the requested size.
+ // We didn't find anything in the huge list.
type = SelectFreeListCategoryType(size_in_bytes);
- node = TryFindNodeIn(type, size_in_bytes, node_size);
+
+ if (type == kTiniest) {
+ // For this tiniest object, the tiny list hasn't been searched yet.
+ // Now searching the tiny list.
+ node = TryFindNodeIn(kTiny, size_in_bytes, node_size);
+ }
+
+ if (node.is_null()) {
+ // Now search the best fitting free list for a node that has at least the
+ // requested size.
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ }
}
if (!node.is_null()) {
@@ -3119,6 +3106,122 @@ FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
return node;
}
+// ------------------------------------------------
+// FreeListFastAlloc implementation
+
+FreeListFastAlloc::FreeListFastAlloc() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kHuge + 1;
+ last_category_ = kHuge;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
+
+FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ // Try to allocate the biggest element possible (to make the most of later
+ // bump-pointer allocations).
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = kHuge; i >= type && node.is_null(); i--) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListMany implementation
+
+// Cf. the declaration of |categories_max| in |spaces.h| to see how this is
+// computed.
+const size_t FreeListMany::categories_max[kNumberOfCategories] = {
+ 24, 32, 40, 48, 56, 64, 72,
+ 80, 88, 96, 104, 112, 120, 128,
+ 136, 144, 152, 160, 168, 176, 184,
+ 192, 200, 208, 216, 224, 232, 240,
+ 248, 256, 384, 512, 768, 1024, 1536,
+ 2048, 3072, 4080, 4088, 4096, 6144, 8192,
+ 12288, 16384, 24576, 32768, 49152, 65536, Page::kPageSize};
+
+FreeListMany::FreeListMany() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kNumberOfCategories;
+ last_category_ = number_of_categories_ - 1;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
+ if (maximum_freed < categories_max[0]) {
+ return 0;
+ }
+ for (int cat = kFirstCategory + 1; cat < last_category_; cat++) {
+ if (maximum_freed <= categories_max[cat]) {
+ return categories_max[cat - 1];
+ }
+ }
+ return maximum_freed;
+}
+
+Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(last_category_);
+ for (int cat = last_category_ - 1; !page && cat >= minimum_category; cat--) {
+ page = GetPageForCategoryType(cat);
+ }
+ return page;
+}
+
+FreeListMany::~FreeListMany() { delete[] categories_; }
+
+FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = type; i < last_category_ && node.is_null(); i++) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// Generic FreeList methods (non alloc/free related)
+
+void FreeList::Reset() {
+ ForAllFreeListCategories(
+ [](FreeListCategory* category) { category->Reset(); });
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
+ categories_[i] = nullptr;
+ }
+ wasted_bytes_ = 0;
+}
+
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
@@ -3148,7 +3251,7 @@ void FreeList::RepairLists(Heap* heap) {
bool FreeList::AddCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
- DCHECK_LT(type, kNumberOfCategories);
+ DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
if (category->is_empty()) return false;
@@ -3165,7 +3268,7 @@ bool FreeList::AddCategory(FreeListCategory* category) {
void FreeList::RemoveCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
- DCHECK_LT(type, kNumberOfCategories);
+ DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
// Common double-linked list removal.
@@ -3193,8 +3296,16 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
PrintF("null\n");
}
+int MemoryChunk::FreeListsLength() {
+ int length = 0;
+ for (int cat = kFirstCategory; cat <= free_list()->last_category(); cat++) {
+ if (categories_[cat] != nullptr) {
+ length += categories_[cat]->FreeListLength();
+ }
+ }
+ return length;
+}
-#ifdef DEBUG
size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace cur = top();
@@ -3209,20 +3320,10 @@ size_t FreeListCategory::SumFreeList() {
return sum;
}
-int FreeListCategory::FreeListLength() {
- int length = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- length++;
- cur = cur.next();
- if (length == kVeryLongFreeList) return length;
- }
- return length;
-}
-
+#ifdef DEBUG
bool FreeList::IsVeryLong() {
int len = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
while (it.HasNext()) {
len += it.Next()->FreeListLength();
@@ -3254,7 +3355,7 @@ void PagedSpace::PrepareForMarkCompact() {
FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
+ free_list_->Reset();
}
size_t PagedSpace::SizeOfObjects() {
@@ -3347,7 +3448,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes));
}
@@ -3366,18 +3467,21 @@ void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
ReadOnlySpace::ReadOnlySpace(Heap* heap)
- : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
+ : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
}
void ReadOnlyPage::MakeHeaderRelocatable() {
- if (mutex_ != nullptr) {
- delete mutex_;
- heap_ = nullptr;
- mutex_ = nullptr;
- local_tracker_ = nullptr;
- reservation_.Reset();
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ // Detached read-only space needs to have a valid marking bitmap and free list
+ // categories. Instruct Lsan to ignore them if required.
+ LSAN_IGNORE_OBJECT(categories_);
+ for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
+ LSAN_IGNORE_OBJECT(categories_[i]);
}
+ LSAN_IGNORE_OBJECT(marking_bitmap_);
+ heap_ = nullptr;
+ owner_ = nullptr;
}
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
@@ -3396,7 +3500,7 @@ void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
// fix them.
void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
- free_list_.RepairLists(heap());
+ free_list_->RepairLists(heap());
// Each page may have a small free space that is not tracked by a free list.
// Those free spaces still contain null as their map pointer.
// Overwrite them with new fillers.
@@ -3422,7 +3526,7 @@ void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
- ReadOnlyHeapIterator iterator(this);
+ ReadOnlyHeapObjectIterator iterator(this);
for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
if (o.IsSeqOneByteString()) {
SeqOneByteString::cast(o).clear_padding();
@@ -3480,13 +3584,14 @@ void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
}
// -----------------------------------------------------------------------------
-// LargeObjectIterator
+// LargeObjectSpaceObjectIterator
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
+LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
+ LargeObjectSpace* space) {
current_ = space->first_page();
}
-HeapObject LargeObjectIterator::Next() {
+HeapObject LargeObjectSpaceObjectIterator::Next() {
if (current_ == nullptr) return HeapObject();
HeapObject object = current_->GetObject();
@@ -3501,7 +3606,10 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
+ : Space(heap, id, new NoFreeList()),
+ size_(0),
+ page_count_(0),
+ objects_size_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
@@ -3584,7 +3692,7 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) {
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
- LargeObjectIterator it(this);
+ LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
@@ -3614,7 +3722,7 @@ void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
}
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
- DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
@@ -3697,7 +3805,8 @@ bool LargeObjectSpace::ContainsSlow(Address addr) {
}
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
+ return std::unique_ptr<ObjectIterator>(
+ new LargeObjectSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
@@ -3722,8 +3831,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(heap()->map_space()->Contains(map) ||
- heap()->read_only_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
if (!(object.IsAbstractCode() || object.IsSeqString() ||
@@ -3787,7 +3895,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
#ifdef DEBUG
void LargeObjectSpace::Print() {
StdoutStream os;
- LargeObjectIterator it(this);
+ LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
obj.Print(os);
}
@@ -3796,9 +3904,9 @@ void LargeObjectSpace::Print() {
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
- this->owner()->name());
- printf(" --------------------------------------\n");
- HeapObjectIterator objects(this);
+ Heap::GetSpaceName(this->owner_identity()));
+ PrintF(" --------------------------------------\n");
+ PagedSpaceObjectIterator objects(this);
unsigned mark_size = 0;
for (HeapObject object = objects.Next(); !object.is_null();
object = objects.Next()) {
@@ -3811,8 +3919,8 @@ void Page::Print() {
object.ShortPrint();
PrintF("\n");
}
- printf(" --------------------------------------\n");
- printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
+ PrintF(" --------------------------------------\n");
+ PrintF(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
heap()->incremental_marking()->marking_state()->live_bytes(this));
}
@@ -3856,7 +3964,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
- DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size);
return result;
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 7522cac9cb..384c731f37 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -20,6 +20,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
@@ -119,19 +120,10 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-enum FreeListCategoryType {
- kTiniest,
- kTiny,
- kSmall,
- kMedium,
- kLarge,
- kHuge,
-
- kFirstCategory = kTiniest,
- kLastCategory = kHuge,
- kNumberOfCategories = kLastCategory + 1,
- kInvalidCategory
-};
+using FreeListCategoryType = int;
+
+static const FreeListCategoryType kFirstCategory = 0;
+static const FreeListCategoryType kInvalidCategory = -1;
enum FreeMode { kLinkCategory, kDoNotLinkCategory };
@@ -151,12 +143,14 @@ class FreeListCategory {
page_(page),
type_(kInvalidCategory),
available_(0),
+ length_(0),
prev_(nullptr),
next_(nullptr) {}
void Initialize(FreeListCategoryType type) {
type_ = type;
available_ = 0;
+ length_ = 0;
prev_ = nullptr;
next_ = nullptr;
}
@@ -188,10 +182,8 @@ class FreeListCategory {
void set_free_list(FreeList* free_list) { free_list_ = free_list; }
-#ifdef DEBUG
size_t SumFreeList();
- int FreeListLength();
-#endif
+ int FreeListLength() { return length_; }
private:
// For debug builds we accurately compute free lists lengths up until
@@ -218,6 +210,9 @@ class FreeListCategory {
// category.
size_t available_;
+ // |length_|: Total blocks in this free list category.
+ int length_;
+
// |top_|: Points to the top FreeSpace in the free list category.
FreeSpace top_;
@@ -230,6 +225,327 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
+// A free list maintains free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer. When the limit is hit we need to
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
+// categories would scatter allocation more.
+class FreeList {
+ public:
+ // Creates a Freelist of the default class (FreeListLegacy for now).
+ V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
+
+ virtual ~FreeList() = default;
+
+ // Returns how much memory can be allocated after freeing maximum_freed
+ // memory.
+ virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because the freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
+
+ // Allocates a free space node frome the free list of at least size_in_bytes
+ // bytes. Returns the actual node size in node_size which can be bigger than
+ // size_in_bytes. This method returns null if the allocation request cannot be
+ // handled by the free list.
+ virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) = 0;
+
+ // Returns a page containing an entry for a given type, or nullptr otherwise.
+ V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
+
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ size_t Available() {
+ size_t available = 0;
+ ForAllFreeListCategories([&available](FreeListCategory* category) {
+ available += category->available();
+ });
+ return available;
+ }
+
+ bool IsEmpty() {
+ bool empty = true;
+ ForAllFreeListCategories([&empty](FreeListCategory* category) {
+ if (!category->is_empty()) empty = false;
+ });
+ return empty;
+ }
+
+ // Used after booting the VM.
+ void RepairLists(Heap* heap);
+
+ V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
+ bool ContainsPageFreeListItems(Page* page);
+
+ int number_of_categories() { return number_of_categories_; }
+ FreeListCategoryType last_category() { return last_category_; }
+
+ size_t wasted_bytes() { return wasted_bytes_; }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+ FreeListCategory* current = categories_[type];
+ while (current != nullptr) {
+ FreeListCategory* next = current->next();
+ callback(current);
+ current = next;
+ }
+ }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < number_of_categories(); i++) {
+ ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+ }
+ }
+
+ bool AddCategory(FreeListCategory* category);
+ V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
+ void PrintCategories(FreeListCategoryType type);
+
+#ifdef DEBUG
+ size_t SumFreeLists();
+ bool IsVeryLong();
+#endif
+
+ protected:
+ class FreeListCategoryIterator final {
+ public:
+ FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+ : current_(free_list->categories_[type]) {}
+
+ bool HasNext() const { return current_ != nullptr; }
+
+ FreeListCategory* Next() {
+ DCHECK(HasNext());
+ FreeListCategory* tmp = current_;
+ current_ = current_->next();
+ return tmp;
+ }
+
+ private:
+ FreeListCategory* current_;
+ };
+
+ // Tries to retrieve a node from the first category in a given |type|.
+ // Returns nullptr if the category is empty or the top entry is smaller
+ // than minimum_size.
+ FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Searches a given |type| for a node of at least |minimum_size|.
+ FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Returns the smallest category in which an object of |size_in_bytes| could
+ // fit.
+ virtual FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) = 0;
+
+ FreeListCategory* top(FreeListCategoryType type) const {
+ return categories_[type];
+ }
+
+ Page* GetPageForCategoryType(FreeListCategoryType type) {
+ return top(type) ? top(type)->page() : nullptr;
+ }
+
+ int number_of_categories_ = 0;
+ FreeListCategoryType last_category_ = 0;
+ size_t min_block_size_ = 0;
+
+ std::atomic<size_t> wasted_bytes_{0};
+ FreeListCategory** categories_ = nullptr;
+
+ friend class FreeListCategory;
+ friend class Page;
+ friend class MemoryChunk;
+ friend class ReadOnlyPage;
+};
+
+// FreeList used for spaces that don't have freelists
+// (only the LargeObject space for now).
+class NoFreeList final : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) final {
+ FATAL("NoFreeList can't be used as a standard FreeList. ");
+ }
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ Page* GetPageForSize(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+
+ private:
+ FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Space is the abstract superclass for all allocation spaces.
+class V8_EXPORT_PRIVATE Space : public Malloced {
+ public:
+ Space(Heap* heap, AllocationSpace id, FreeList* free_list)
+ : allocation_observers_paused_(false),
+ heap_(heap),
+ id_(id),
+ committed_(0),
+ max_committed_(0),
+ free_list_(std::unique_ptr<FreeList>(free_list)) {
+ external_backing_store_bytes_ =
+ new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
+ external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
+ external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
+ 0;
+ CheckOffsetsAreConsistent();
+ }
+
+ void CheckOffsetsAreConsistent() const;
+
+ static inline void MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
+
+ virtual ~Space() {
+ delete[] external_backing_store_bytes_;
+ external_backing_store_bytes_ = nullptr;
+ }
+
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ bool IsDetached() const { return heap_ == nullptr; }
+
+ AllocationSpace identity() { return id_; }
+
+ const char* name() { return Heap::GetSpaceName(id_); }
+
+ virtual void AddAllocationObserver(AllocationObserver* observer);
+
+ virtual void RemoveAllocationObserver(AllocationObserver* observer);
+
+ virtual void PauseAllocationObservers();
+
+ virtual void ResumeAllocationObservers();
+
+ virtual void StartNextInlineAllocationStep() {}
+
+ void AllocationStep(int bytes_since_last, Address soon_object, int size);
+
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual size_t CommittedMemory() { return committed_; }
+
+ virtual size_t MaximumCommittedMemory() { return max_committed_; }
+
+ // Returns allocated size.
+ virtual size_t Size() = 0;
+
+ // Returns size of objects. Can differ from the allocated size
+ // (e.g. see LargeObjectSpace).
+ virtual size_t SizeOfObjects() { return Size(); }
+
+ // Approximate amount of physical memory committed for this space.
+ virtual size_t CommittedPhysicalMemory() = 0;
+
+ // Return the available bytes without growing.
+ virtual size_t Available() = 0;
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (id_ == CODE_SPACE) {
+ return RoundDown(size, kCodeAlignment);
+ } else {
+ return RoundDown(size, kTaggedSize);
+ }
+ }
+
+ virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
+
+ void AccountCommitted(size_t bytes) {
+ DCHECK_GE(committed_ + bytes, committed_);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(size_t bytes) {
+ DCHECK_GE(committed_, committed_ - bytes);
+ committed_ -= bytes;
+ }
+
+ inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ // Returns amount of off-heap memory in-use by objects in this Space.
+ virtual size_t ExternalBackingStoreBytes(
+ ExternalBackingStoreType type) const {
+ return external_backing_store_bytes_[type];
+ }
+
+ void* GetRandomMmapAddr();
+
+ MemoryChunk* first_page() { return memory_chunk_list_.front(); }
+ MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+
+ base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+
+ FreeList* free_list() { return free_list_.get(); }
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+
+ protected:
+ intptr_t GetNextInlineAllocationStepSize();
+ bool AllocationObserversActive() {
+ return !allocation_observers_paused_ && !allocation_observers_.empty();
+ }
+
+ void DetachFromHeap() { heap_ = nullptr; }
+
+ std::vector<AllocationObserver*> allocation_observers_;
+
+ // The List manages the pages that belong to the given space.
+ base::List<MemoryChunk> memory_chunk_list_;
+
+ // Tracks off-heap memory used by this space.
+ std::atomic<size_t>* external_backing_store_bytes_;
+
+ static const intptr_t kIdOffset = 9 * kSystemPointerSize;
+
+ bool allocation_observers_paused_;
+ Heap* heap_;
+ AllocationSpace id_;
+
+ // Keeps track of committed memory in a space.
+ size_t committed_;
+ size_t max_committed_;
+
+ std::unique_ptr<FreeList> free_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(Space);
+};
+
// The CodeObjectRegistry holds all start addresses of code objects of a given
// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
@@ -265,7 +581,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
-class MemoryChunk {
+class MemoryChunk : public BasicMemoryChunk {
public:
// Use with std data structures.
struct Hasher {
@@ -274,74 +590,6 @@ class MemoryChunk {
}
};
- enum Flag {
- NO_FLAGS = 0u,
- IS_EXECUTABLE = 1u << 0,
- POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
- POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
- // A page in the from-space or a young large page that was not scavenged
- // yet.
- FROM_PAGE = 1u << 3,
- // A page in the to-space or a young large page that was scavenged.
- TO_PAGE = 1u << 4,
- LARGE_PAGE = 1u << 5,
- EVACUATION_CANDIDATE = 1u << 6,
- NEVER_EVACUATE = 1u << 7,
-
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR = 1u << 8,
-
- // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
- // from new to old space during evacuation.
- PAGE_NEW_OLD_PROMOTION = 1u << 9,
-
- // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
- // within the new space during evacuation.
- PAGE_NEW_NEW_PROMOTION = 1u << 10,
-
- // This flag is intended to be used for testing. Works only when both
- // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
- // are set. It forces the page to become an evacuation candidate at next
- // candidates selection cycle.
- FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
-
- // This flag is intended to be used for testing.
- NEVER_ALLOCATE_ON_PAGE = 1u << 12,
-
- // The memory chunk is already logically freed, however the actual freeing
- // still has to be performed.
- PRE_FREED = 1u << 13,
-
- // |POOLED|: When actually freeing this chunk, only uncommit and do not
- // give up the reservation as we still reuse the chunk at some point.
- POOLED = 1u << 14,
-
- // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
- // has been aborted and needs special handling by the sweeper.
- COMPACTION_WAS_ABORTED = 1u << 15,
-
- // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
- // on pages is sometimes aborted. The flag is used to avoid repeatedly
- // triggering on the same page.
- COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
-
- // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
- // to iterate the page.
- SWEEP_TO_ITERATE = 1u << 17,
-
- // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
- // enabled.
- INCREMENTAL_MARKING = 1u << 18,
- NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
-
- // The memory chunk freeing bookkeeping has been performed but the chunk has
- // not yet been freed.
- UNREGISTERED = 1u << 20
- };
-
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
@@ -370,36 +618,12 @@ class MemoryChunk {
kSweepingInProgress,
};
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = 0;
- static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
- static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
- static const intptr_t kReservationOffset =
- kMarkBitmapOffset + kSystemPointerSize;
- static const intptr_t kHeapOffset =
- kReservationOffset + 3 * kSystemPointerSize;
- static const intptr_t kHeaderSentinelOffset =
- kHeapOffset + kSystemPointerSize;
- static const intptr_t kOwnerOffset =
- kHeaderSentinelOffset + kSystemPointerSize;
-
static const size_t kHeaderSize =
- kSizeOffset // NOLINT
- + kSizetSize // size_t size
- + kUIntptrSize // uintptr_t flags_
- + kSystemPointerSize // Bitmap* marking_bitmap_
- + 3 * kSystemPointerSize // VirtualMemory reservation_
- + kSystemPointerSize // Heap* heap_
- + kSystemPointerSize // Address header_sentinel_
- + kSystemPointerSize // Address area_start_
- + kSystemPointerSize // Address area_end_
- + kSystemPointerSize // Address owner_
- + kSizetSize // size_t progress_bar_
- + kIntptrSize // intptr_t live_byte_count_
+ BasicMemoryChunk::kHeaderSize // Parent size.
+ + 3 * kSystemPointerSize // VirtualMemory reservation_
+ + kSystemPointerSize // Address owner_
+ + kSizetSize // size_t progress_bar_
+ + kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
@@ -415,9 +639,8 @@ class MemoryChunk {
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // base::ListNode
- + kSystemPointerSize * kNumberOfCategories
- // FreeListCategory categories_[kNumberOfCategories]
- + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ + kSystemPointerSize // FreeListCategory** categories__
+ + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kSystemPointerSize // Bitmap* young_generation_bitmap_
+ kSystemPointerSize; // CodeObjectRegistry* code_object_registry_
@@ -428,14 +651,12 @@ class MemoryChunk {
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
- static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
-
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromHeapObject(const HeapObject o) {
+ static MemoryChunk* FromHeapObject(HeapObject o) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
@@ -465,22 +686,8 @@ class MemoryChunk {
void DiscardUnusedMemory(Address addr, size_t size);
- Address address() const {
- return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
- }
-
base::Mutex* mutex() { return mutex_; }
- bool Contains(Address addr) {
- return addr >= area_start() && addr < area_end();
- }
-
- // Checks whether |addr| can be a limit of addresses in this page. It's a
- // limit if it's in the page, or if it's just after the last byte of the page.
- bool ContainsLimit(Address addr) {
- return addr >= area_start() && addr <= area_end();
- }
-
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
@@ -491,15 +698,17 @@ class MemoryChunk {
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
- size_t size() const { return size_; }
- void set_size(size_t size) { size_ = size; }
-
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
- Heap* synchronized_heap();
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race in
+ // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
+ // release store.
+ void SynchronizedHeapLoad();
+#endif
template <RememberedSetType type>
bool ContainsSlots() {
@@ -547,12 +756,7 @@ class MemoryChunk {
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
- void AllocateMarkingBitmap();
- void ReleaseMarkingBitmap();
-
- Address area_start() { return area_start_; }
- Address area_end() { return area_end_; }
- size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
+ int FreeListsLength();
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
@@ -596,36 +800,6 @@ class MemoryChunk {
return this->address() + (index << kTaggedSizeLog2);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- void SetFlag(Flag flag) {
- if (access_mode == AccessMode::NON_ATOMIC) {
- flags_ |= flag;
- } else {
- base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
- }
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsFlagSet(Flag flag) {
- return (GetFlags<access_mode>() & flag) != 0;
- }
-
- void ClearFlag(Flag flag) { flags_ &= ~flag; }
- // Set or clear multiple flags at a time. The flags in the mask are set to
- // the value in "flags", the rest retain the current value in |flags_|.
- void SetFlags(uintptr_t flags, uintptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- uintptr_t GetFlags() {
- if (access_mode == AccessMode::NON_ATOMIC) {
- return flags_;
- } else {
- return base::AsAtomicWord::Relaxed_Load(&flags_);
- }
- }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
@@ -653,12 +827,11 @@ class MemoryChunk {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
- bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
- bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
- bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
-
+ bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
+ bool IsToPage() const { return IsFlagSet(TO_PAGE); }
+ bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
bool InYoungGeneration() const {
- return (flags_ & kIsInYoungGenerationMask) != 0;
+ return (GetFlags() & kIsInYoungGenerationMask) != 0;
}
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
bool InNewLargeObjectSpace() const {
@@ -667,11 +840,20 @@ class MemoryChunk {
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+ // Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
- static inline bool HasHeaderSentinel(Address slot_addr);
+ bool IsWritable() const {
+ // If this is a read-only space chunk but heap_ is non-null, it has not yet
+ // been sealed and can be written to.
+ return !InReadOnlySpace() || heap_ != nullptr;
+ }
+
+ // Gets the chunk's allocation space, potentially dealing with a null owner_
+ // (like read-only chunks have).
+ inline AllocationSpace owner_identity() const;
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
@@ -693,14 +875,20 @@ class MemoryChunk {
CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
+ FreeList* free_list() { return owner()->free_list(); }
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
VirtualMemory reservation);
- // Should be called when memory chunk is about to be freed.
- void ReleaseAllocatedMemory();
+ // Release all memory allocated by the chunk. Should be called when memory
+ // chunk is about to be freed.
+ void ReleaseAllAllocatedMemory();
+ // Release memory allocated by the chunk, except that which is needed by
+ // read-only space chunks.
+ void ReleaseAllocatedMemoryNeededForWritableChunk();
// Sets the requested page permissions only if the write unprotect counter
// has reached 0.
@@ -719,29 +907,12 @@ class MemoryChunk {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
- size_t size_;
- uintptr_t flags_;
-
- Bitmap* marking_bitmap_;
-
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
- Heap* heap_;
-
- // This is used to distinguish the memory chunk header from the interior of a
- // large page. The memory chunk header stores here an impossible tagged
- // pointer: the tagger pointer of the page start. A field in a large object is
- // guaranteed to not contain such a pointer.
- Address header_sentinel_;
-
// The space owning this memory chunk.
std::atomic<Space*> owner_;
- // Start and end of allocatable memory on this chunk.
- Address area_start_;
- Address area_end_;
-
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
@@ -792,7 +963,7 @@ class MemoryChunk {
base::ListNode<MemoryChunk> list_node_;
- FreeListCategory* categories_[kNumberOfCategories];
+ FreeListCategory** categories_;
LocalArrayBufferTracker* local_tracker_;
@@ -807,10 +978,8 @@ class MemoryChunk {
friend class ConcurrentMarkingState;
friend class IncrementalMarkingState;
friend class MajorAtomicMarkingState;
- friend class MajorMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
- friend class MemoryChunkValidator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
@@ -819,7 +988,7 @@ class MemoryChunk {
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 512K. Large object pages may be larger.
+// A page is a memory chunk of a size 256K. Large object pages may be larger.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@@ -840,7 +1009,7 @@ class Page : public MemoryChunk {
static Page* FromAddress(Address addr) {
return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
}
- static Page* FromHeapObject(const HeapObject o) {
+ static Page* FromHeapObject(HeapObject o) {
return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
}
@@ -873,7 +1042,7 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
callback(categories_[i]);
}
}
@@ -884,8 +1053,8 @@ class Page : public MemoryChunk {
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
Address address_in_page = address() + offset;
- DCHECK_GE(address_in_page, area_start_);
- DCHECK_LT(address_in_page, area_end_);
+ DCHECK_GE(address_in_page, area_start());
+ DCHECK_LT(address_in_page, area_end());
return address_in_page;
}
@@ -963,7 +1132,7 @@ class LargePage : public MemoryChunk {
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
- static LargePage* FromHeapObject(const HeapObject o) {
+ static LargePage* FromHeapObject(HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
@@ -986,162 +1155,11 @@ class LargePage : public MemoryChunk {
friend class MemoryAllocator;
};
-
-// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class V8_EXPORT_PRIVATE Space : public Malloced {
- public:
- Space(Heap* heap, AllocationSpace id)
- : allocation_observers_paused_(false),
- heap_(heap),
- id_(id),
- committed_(0),
- max_committed_(0) {
- external_backing_store_bytes_ =
- new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
- external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
- external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
- 0;
- CheckOffsetsAreConsistent();
- }
-
- void CheckOffsetsAreConsistent() const;
-
- static inline void MoveExternalBackingStoreBytes(
- ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
-
- virtual ~Space() {
- delete[] external_backing_store_bytes_;
- external_backing_store_bytes_ = nullptr;
- }
-
- Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
- // Identity used in error reporting.
- AllocationSpace identity() { return id_; }
-
- const char* name() { return Heap::GetSpaceName(id_); }
-
- virtual void AddAllocationObserver(AllocationObserver* observer);
-
- virtual void RemoveAllocationObserver(AllocationObserver* observer);
-
- virtual void PauseAllocationObservers();
-
- virtual void ResumeAllocationObservers();
-
- virtual void StartNextInlineAllocationStep() {}
-
- void AllocationStep(int bytes_since_last, Address soon_object, int size);
-
- // Return the total amount committed memory for this space, i.e., allocatable
- // memory and page headers.
- virtual size_t CommittedMemory() { return committed_; }
-
- virtual size_t MaximumCommittedMemory() { return max_committed_; }
-
- // Returns allocated size.
- virtual size_t Size() = 0;
-
- // Returns size of objects. Can differ from the allocated size
- // (e.g. see LargeObjectSpace).
- virtual size_t SizeOfObjects() { return Size(); }
-
- // Approximate amount of physical memory committed for this space.
- virtual size_t CommittedPhysicalMemory() = 0;
-
- // Return the available bytes without growing.
- virtual size_t Available() = 0;
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (id_ == CODE_SPACE) {
- return RoundDown(size, kCodeAlignment);
- } else {
- return RoundDown(size, kTaggedSize);
- }
- }
-
- virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
-
- void AccountCommitted(size_t bytes) {
- DCHECK_GE(committed_ + bytes, committed_);
- committed_ += bytes;
- if (committed_ > max_committed_) {
- max_committed_ = committed_;
- }
- }
-
- void AccountUncommitted(size_t bytes) {
- DCHECK_GE(committed_, committed_ - bytes);
- committed_ -= bytes;
- }
-
- inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- // Returns amount of off-heap memory in-use by objects in this Space.
- virtual size_t ExternalBackingStoreBytes(
- ExternalBackingStoreType type) const {
- return external_backing_store_bytes_[type];
- }
-
- void* GetRandomMmapAddr();
-
- MemoryChunk* first_page() { return memory_chunk_list_.front(); }
- MemoryChunk* last_page() { return memory_chunk_list_.back(); }
-
- base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- protected:
- intptr_t GetNextInlineAllocationStepSize();
- bool AllocationObserversActive() {
- return !allocation_observers_paused_ && !allocation_observers_.empty();
- }
-
- void DetachFromHeap() { heap_ = nullptr; }
-
- std::vector<AllocationObserver*> allocation_observers_;
-
- // The List manages the pages that belong to the given space.
- base::List<MemoryChunk> memory_chunk_list_;
-
- // Tracks off-heap memory used by this space.
- std::atomic<size_t>* external_backing_store_bytes_;
-
- private:
- static const intptr_t kIdOffset = 9 * kSystemPointerSize;
-
- bool allocation_observers_paused_;
- Heap* heap_;
- AllocationSpace id_;
-
- // Keeps track of committed memory in a space.
- size_t committed_;
- size_t max_committed_;
-
- DISALLOW_COPY_AND_ASSIGN(Space);
-};
-
-class MemoryChunkValidator {
- // Computed offsets should match the compiler generated ones.
- STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
-
- // Validate our estimates on the header size.
- STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
- STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
- STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-};
-
+// Validate our estimates on the header size.
+STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
// The process-wide singleton that keeps track of code range regions with the
// intention to reuse free code range regions as a workaround for CFG memory
@@ -1205,7 +1223,7 @@ class MemoryAllocator {
chunk = GetMemoryChunkSafe<kRegular>();
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
- chunk->ReleaseAllocatedMemory();
+ chunk->ReleaseAllAllocatedMemory();
}
}
return chunk;
@@ -1597,17 +1615,17 @@ class PageRange {
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
+// A PagedSpaceObjectIterator iterates objects from the bottom of the given
+// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
-class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
+class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
- explicit HeapObjectIterator(PagedSpace* space);
- explicit HeapObjectIterator(Page* page);
+ explicit PagedSpaceObjectIterator(PagedSpace* space);
+ explicit PagedSpaceObjectIterator(Page* page);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
@@ -1629,7 +1647,6 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
PageRange::iterator current_page_;
};
-
// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
@@ -1761,13 +1778,6 @@ class AllocationStats {
#endif
};
-// A free list maintaining free blocks of memory. The free list is organized in
-// a way to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which is
-// divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
// The free list is organized in categories as follows:
// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
@@ -1782,11 +1792,9 @@ class AllocationStats {
// words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
// larger. Empty pages are also added to this list.
-class FreeList {
+class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
public:
- // This method returns how much memory can be allocated after freeing
- // maximum_freed memory.
- static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
+ size_t GuaranteedAllocatable(size_t maximum_freed) override {
if (maximum_freed <= kTiniestListMax) {
// Since we are not iterating over all list entries, we cannot guarantee
// that we can find the maximum freed block in that free list.
@@ -1803,7 +1811,50 @@ class FreeList {
return maximum_freed;
}
- static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
+ Page* GetPageForSize(size_t size_in_bytes) override {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ if (!page && static_cast<int>(kSmall) >= minimum_category)
+ page = GetPageForCategoryType(kSmall);
+ if (!page && static_cast<int>(kTiny) >= minimum_category)
+ page = GetPageForCategoryType(kTiny);
+ if (!page && static_cast<int>(kTiniest) >= minimum_category)
+ page = GetPageForCategoryType(kTiniest);
+ return page;
+ }
+
+ FreeListLegacy();
+ ~FreeListLegacy();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
+
+ private:
+ enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
+
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
+
+ static const size_t kTiniestListMax = 0xa * kTaggedSize;
+ static const size_t kTinyListMax = 0x1f * kTaggedSize;
+ static const size_t kSmallListMax = 0xff * kTaggedSize;
+ static const size_t kMediumListMax = 0x7ff * kTaggedSize;
+ static const size_t kLargeListMax = 0x1fff * kTaggedSize;
+ static const size_t kTinyAllocationMax = kTiniestListMax;
+ static const size_t kSmallAllocationMax = kTinyListMax;
+ static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kLargeAllocationMax = kMediumListMax;
+
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
if (size_in_bytes <= kTiniestListMax) {
return kTiniest;
} else if (size_in_bytes <= kTinyListMax) {
@@ -1818,152 +1869,145 @@ class FreeList {
return kHuge;
}
- FreeList();
-
- // Adds a node on the free list. The block of size {size_in_bytes} starting
- // at {start} is placed on the free list. The return value is the number of
- // bytes that were not added to the free list, because they freed memory block
- // was too small. Bookkeeping information will be written to the block, i.e.,
- // its contents will be destroyed. The start address should be word aligned,
- // and the size should be a non-zero multiple of the word size.
- size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
-
- // Allocates a free space node frome the free list of at least size_in_bytes
- // bytes. Returns the actual node size in node_size which can be bigger than
- // size_in_bytes. This method returns null if the allocation request cannot be
- // handled by the free list.
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- size_t Available() {
- size_t available = 0;
- ForAllFreeListCategories([&available](FreeListCategory* category) {
- available += category->available();
- });
- return available;
- }
-
- bool IsEmpty() {
- bool empty = true;
- ForAllFreeListCategories([&empty](FreeListCategory* category) {
- if (!category->is_empty()) empty = false;
- });
- return empty;
+ // Returns the category to be used to allocate |size_in_bytes| in the fast
+ // path. The tiny categories are not used for fast allocation.
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ if (size_in_bytes <= kSmallAllocationMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumAllocationMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeAllocationMax) {
+ return kLarge;
+ }
+ return kHuge;
}
- // Used after booting the VM.
- void RepairLists(Heap* heap);
-
- V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
- bool ContainsPageFreeListItems(Page* page);
-
- size_t wasted_bytes() { return wasted_bytes_; }
+ friend class FreeListCategory;
+ friend class heap::HeapTester;
+};
- template <typename Callback>
- void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
- FreeListCategory* current = categories_[type];
- while (current != nullptr) {
- FreeListCategory* next = current->next();
- callback(current);
- current = next;
+// Inspired by FreeListLegacy.
+// Only has 3 categories: Medium, Large and Huge.
+// Any block that would have belong to tiniest, tiny or small in FreeListLegacy
+// is considered wasted.
+// Allocation is done only in Huge, Medium and Large (in that order),
+// using a first-fit strategy (only the first block of each freelist is ever
+// considered though). Performances is supposed to be better than
+// FreeListLegacy, but memory usage should be higher (because fragmentation will
+// probably be higher).
+class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override {
+ if (maximum_freed <= kMediumListMax) {
+ // Since we are not iterating over all list entries, we cannot guarantee
+ // that we can find the maximum freed block in that free list.
+ return 0;
+ } else if (maximum_freed <= kLargeListMax) {
+ return kLargeAllocationMax;
}
+ return kHugeAllocationMax;
}
- template <typename Callback>
- void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
- }
+ Page* GetPageForSize(size_t size_in_bytes) override {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ return page;
}
- bool AddCategory(FreeListCategory* category);
- V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
- void PrintCategories(FreeListCategoryType type);
-
- // Returns a page containing an entry for a given type, or nullptr otherwise.
- inline Page* GetPageForCategoryType(FreeListCategoryType type);
+ FreeListFastAlloc();
+ ~FreeListFastAlloc();
-#ifdef DEBUG
- size_t SumFreeLists();
- bool IsVeryLong();
-#endif
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
private:
- class FreeListCategoryIterator {
- public:
- FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
- : current_(free_list->categories_[type]) {}
+ enum { kMedium, kLarge, kHuge };
- bool HasNext() { return current_ != nullptr; }
-
- FreeListCategory* Next() {
- DCHECK(HasNext());
- FreeListCategory* tmp = current_;
- current_ = current_->next();
- return tmp;
- }
-
- private:
- FreeListCategory* current_;
- };
-
- // The size range of blocks, in bytes.
- static const size_t kMinBlockSize = 3 * kTaggedSize;
+ static const size_t kMinBlockSize = 0xff * kTaggedSize;
// This is a conservative upper bound. The actual maximum block size takes
// padding and alignment of data and code pages into account.
static const size_t kMaxBlockSize = Page::kPageSize;
- static const size_t kTiniestListMax = 0xa * kTaggedSize;
- static const size_t kTinyListMax = 0x1f * kTaggedSize;
- static const size_t kSmallListMax = 0xff * kTaggedSize;
static const size_t kMediumListMax = 0x7ff * kTaggedSize;
static const size_t kLargeListMax = 0x1fff * kTaggedSize;
- static const size_t kTinyAllocationMax = kTiniestListMax;
- static const size_t kSmallAllocationMax = kTinyListMax;
- static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kMediumAllocationMax = kMinBlockSize;
static const size_t kLargeAllocationMax = kMediumListMax;
+ static const size_t kHugeAllocationMax = kLargeListMax;
- // Walks all available categories for a given |type| and tries to retrieve
- // a node. Returns nullptr if the category is empty.
- FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Tries to retrieve a node from the first category in a given |type|.
- // Returns nullptr if the category is empty or the top entry is smaller
- // than minimum_size.
- FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size);
-
- // The tiny categories are not used for fast allocation.
- FreeListCategoryType SelectFastAllocationFreeListCategoryType(
- size_t size_in_bytes) {
- if (size_in_bytes <= kSmallAllocationMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumAllocationMax) {
+ // Returns the category used to hold an object of size |size_in_bytes|.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ if (size_in_bytes <= kMediumListMax) {
return kMedium;
- } else if (size_in_bytes <= kLargeAllocationMax) {
+ } else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
- FreeListCategory* top(FreeListCategoryType type) const {
- return categories_[type];
+ Page* GetPageForCategoryType(FreeListCategoryType type) {
+ return top(type) ? top(type)->page() : nullptr;
}
+};
- std::atomic<size_t> wasted_bytes_;
- FreeListCategory* categories_[kNumberOfCategories];
+// Use 49 Freelists: on per size between 24 and 256, and then a few ones for
+// larger sizes. See the variable |categories_max| for the size of each
+// Freelist. Allocation is done using a best-fit strategy (considering only the
+// first element of each category though).
+// Performances are expected to be worst than FreeListLegacy, but memory
+// consumption should be lower (since fragmentation should be lower).
+class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
- friend class FreeListCategory;
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMany();
+ ~FreeListMany();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
+
+ private:
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
+
+ // Categories boundaries generated with:
+ // perl -E '
+ // @cat = map {$_*8} 3..32, 48, 64;
+ // while ($cat[-1] <= 32768) {
+ // push @cat, $cat[-1]+$cat[-3], $cat[-1]*2
+ // }
+ // push @cat, 4080, 4088;
+ // @cat = sort { $a <=> $b } @cat;
+ // push @cat, "Page::kPageSize";
+ // say join ", ", @cat;
+ // say "\n", scalar @cat'
+ // Note the special case for 4080 and 4088 bytes: experiments have shown that
+ // this category classes are more used than others of similar sizes
+ static const int kNumberOfCategories = 49;
+ static const size_t categories_max[kNumberOfCategories];
+
+ // Return the smallest category that could hold |size_in_bytes| bytes.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ for (int cat = kFirstCategory; cat < last_category_; cat++) {
+ if (size_in_bytes <= categories_max[cat]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
};
// LocalAllocationBuffer represents a linear allocation area that is created
@@ -2029,8 +2073,8 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space {
public:
- SpaceWithLinearArea(Heap* heap, AllocationSpace id)
- : Space(heap, id), top_on_previous_step_(0) {
+ SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
+ : Space(heap, id, free_list), top_on_previous_step_(0) {
allocation_info_.Reset(kNullAddress, kNullAddress);
}
@@ -2091,7 +2135,8 @@ class V8_EXPORT_PRIVATE PagedSpace
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
- PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
+ PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
+ FreeList* free_list);
~PagedSpace() override { TearDown(); }
@@ -2119,14 +2164,14 @@ class V8_EXPORT_PRIVATE PagedSpace
// to the available and wasted totals. The free list is cleared as well.
void ClearAllocatorState() {
accounting_stats_.ClearSize();
- free_list_.Reset();
+ free_list_->Reset();
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- size_t Available() override { return free_list_.Available(); }
+ size_t Available() override { return free_list_->Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
@@ -2140,7 +2185,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
- virtual size_t Waste() { return free_list_.wasted_bytes(); }
+ virtual size_t Waste() { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
@@ -2173,7 +2218,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
size_t AccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
+ size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
@@ -2181,7 +2226,7 @@ class V8_EXPORT_PRIVATE PagedSpace
}
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
@@ -2211,7 +2256,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page);
- Page* InitializePage(MemoryChunk* chunk, Executability executable);
+ Page* InitializePage(MemoryChunk* chunk);
void ReleasePage(Page* page);
@@ -2275,8 +2320,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// sweeper.
virtual void RefillFreeList();
- FreeList* free_list() { return &free_list_; }
-
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
@@ -2368,9 +2411,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Accounting information for this space.
AllocationStats accounting_stats_;
- // The space's free list.
- FreeList free_list_;
-
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
@@ -2396,7 +2436,7 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE),
+ : Space(heap, NEW_SPACE, new NoFreeList()),
current_capacity_(0),
maximum_capacity_(0),
minimum_capacity_(0),
@@ -2465,7 +2505,7 @@ class SemiSpace : public Space {
void RemovePage(Page* page);
void PrependPage(Page* page);
- Page* InitializePage(MemoryChunk* chunk, Executability executable);
+ Page* InitializePage(MemoryChunk* chunk);
// Age mark accessors.
Address age_mark() { return age_mark_; }
@@ -2552,19 +2592,18 @@ class SemiSpace : public Space {
int pages_used_;
friend class NewSpace;
- friend class SemiSpaceIterator;
+ friend class SemiSpaceObjectIterator;
};
-
-// A SemiSpaceIterator is an ObjectIterator that iterates over the active
+// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
// semispace of the heap's new space. It iterates over the objects in the
// semispace from a given start address (defaulting to the bottom of the
// semispace) to the top of the semispace. New objects allocated after the
// iterator is created are not iterated.
-class SemiSpaceIterator : public ObjectIterator {
+class SemiSpaceObjectIterator : public ObjectIterator {
public:
// Create an iterator over the allocated objects in the given to-space.
- explicit SemiSpaceIterator(NewSpace* space);
+ explicit SemiSpaceObjectIterator(NewSpace* space);
inline HeapObject Next() override;
@@ -2821,7 +2860,7 @@ class V8_EXPORT_PRIVATE NewSpace
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
- friend class SemiSpaceIterator;
+ friend class SemiSpaceObjectIterator;
};
class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
@@ -2840,7 +2879,7 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
- : PagedSpace(heap, id, executable) {}
+ : PagedSpace(heap, id, executable, FreeList::CreateFreeList()) {}
bool is_local() override { return true; }
@@ -2886,7 +2925,9 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
+ explicit OldSpace(Heap* heap)
+ : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
@@ -2901,7 +2942,8 @@ class CodeSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
+ explicit CodeSpace(Heap* heap)
+ : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
@@ -2918,7 +2960,9 @@ class CodeSpace : public PagedSpace {
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
- explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
+ explicit MapSpace(Heap* heap)
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -2946,6 +2990,9 @@ class ReadOnlySpace : public PagedSpace {
bool writable() const { return !is_marked_read_only_; }
+ bool Contains(Address a) = delete;
+ bool Contains(Object o) = delete;
+
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
@@ -3056,7 +3103,7 @@ class LargeObjectSpace : public Space {
size_t objects_size_; // size of objects
private:
- friend class LargeObjectIterator;
+ friend class LargeObjectSpaceObjectIterator;
};
class NewLargeObjectSpace : public LargeObjectSpace {
@@ -3112,9 +3159,9 @@ class CodeLargeObjectSpace : public LargeObjectSpace {
std::unordered_map<Address, LargePage*> chunk_map_;
};
-class LargeObjectIterator : public ObjectIterator {
+class LargeObjectSpaceObjectIterator : public ObjectIterator {
public:
- explicit LargeObjectIterator(LargeObjectSpace* space);
+ explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
HeapObject Next() override;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index e59e72d3a6..7d0dcfc370 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -104,16 +104,7 @@ void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
Address start, Address end) {
- // In GC the store buffer has to be empty at any time.
- DCHECK(store_buffer->Empty());
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- Page* page = Page::FromAddress(start);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(page, start);
- }
+ UNREACHABLE();
}
void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
diff --git a/deps/v8/src/heap/stress-marking-observer.cc b/deps/v8/src/heap/stress-marking-observer.cc
index 091f279a78..bb7720e118 100644
--- a/deps/v8/src/heap/stress-marking-observer.cc
+++ b/deps/v8/src/heap/stress-marking-observer.cc
@@ -9,14 +9,14 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
-StressMarkingObserver::StressMarkingObserver(Heap& heap)
+StressMarkingObserver::StressMarkingObserver(Heap* heap)
: AllocationObserver(64), heap_(heap) {}
void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
- heap_.StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
- kNoGCCallbackFlags);
- heap_.incremental_marking()->EnsureBlackAllocated(soon_object, size);
+ heap_->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
+ kNoGCCallbackFlags);
+ heap_->incremental_marking()->EnsureBlackAllocated(soon_object, size);
}
} // namespace internal
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
index 37ebb82197..5736ba9289 100644
--- a/deps/v8/src/heap/stress-marking-observer.h
+++ b/deps/v8/src/heap/stress-marking-observer.h
@@ -12,12 +12,12 @@ namespace internal {
class StressMarkingObserver : public AllocationObserver {
public:
- explicit StressMarkingObserver(Heap& heap);
+ explicit StressMarkingObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
private:
- Heap& heap_;
+ Heap* heap_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/stress-scavenge-observer.cc b/deps/v8/src/heap/stress-scavenge-observer.cc
index b91825c38b..5aa3419ed7 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.cc
+++ b/deps/v8/src/heap/stress-scavenge-observer.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
-StressScavengeObserver::StressScavengeObserver(Heap& heap)
+StressScavengeObserver::StressScavengeObserver(Heap* heap)
: AllocationObserver(64),
heap_(heap),
has_requested_gc_(false),
@@ -21,22 +21,22 @@ StressScavengeObserver::StressScavengeObserver(Heap& heap)
limit_percentage_ = NextLimit();
if (FLAG_trace_stress_scavenge && !FLAG_fuzzer_gc_analysis) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[StressScavenge] %d%% is the new limit\n", limit_percentage_);
}
}
void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
- if (has_requested_gc_ || heap_.new_space()->Capacity() == 0) {
+ if (has_requested_gc_ || heap_->new_space()->Capacity() == 0) {
return;
}
double current_percent =
- heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
}
@@ -49,11 +49,11 @@ void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
if (static_cast<int>(current_percent) >= limit_percentage_) {
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
+ heap_->isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
}
has_requested_gc_ = true;
- heap_.isolate()->stack_guard()->RequestGC();
+ heap_->isolate()->stack_guard()->RequestGC();
}
}
@@ -63,15 +63,15 @@ bool StressScavengeObserver::HasRequestedGC() const {
void StressScavengeObserver::RequestedGCDone() {
double current_percent =
- heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
limit_percentage_ = NextLimit(static_cast<int>(current_percent));
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
- heap_.isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
- limit_percentage_);
+ heap_->isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
+ limit_percentage_);
}
has_requested_gc_ = false;
@@ -87,7 +87,7 @@ int StressScavengeObserver::NextLimit(int min) {
return max;
}
- return min + heap_.isolate()->fuzzer_rng()->NextInt(max - min + 1);
+ return min + heap_->isolate()->fuzzer_rng()->NextInt(max - min + 1);
}
} // namespace internal
diff --git a/deps/v8/src/heap/stress-scavenge-observer.h b/deps/v8/src/heap/stress-scavenge-observer.h
index b39b2eac59..4996323b75 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.h
+++ b/deps/v8/src/heap/stress-scavenge-observer.h
@@ -12,7 +12,7 @@ namespace internal {
class StressScavengeObserver : public AllocationObserver {
public:
- explicit StressScavengeObserver(Heap& heap);
+ explicit StressScavengeObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
@@ -24,7 +24,7 @@ class StressScavengeObserver : public AllocationObserver {
double MaxNewSpaceSizeReached() const;
private:
- Heap& heap_;
+ Heap* heap_;
int limit_percentage_;
bool has_requested_gc_;
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 8f7b55bf2b..cbb7d717b0 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -184,7 +184,7 @@ void Sweeper::StartSweeperTasks() {
void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
if (!page->SweepingDone()) {
- ParallelSweepPage(page, page->owner()->identity());
+ ParallelSweepPage(page, page->owner_identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
@@ -370,7 +370,9 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
p->set_concurrent_sweeping_state(Page::kSweepingDone);
if (code_object_registry) code_object_registry->Finalize();
if (free_list_mode == IGNORE_FREE_LIST) return 0;
- return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
+
+ return static_cast<int>(
+ p->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
@@ -500,7 +502,7 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
}
void Sweeper::EnsurePageIsIterable(Page* page) {
- AllocationSpace space = page->owner()->identity();
+ AllocationSpace space = page->owner_identity();
if (IsValidSweepingSpace(space)) {
SweepOrWaitUntilSweepingCompleted(page);
} else {
@@ -573,7 +575,7 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(sweeping_in_progress_);
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
- DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
@@ -581,7 +583,7 @@ void Sweeper::AddPageForIterability(Page* page) {
}
void Sweeper::MakeIterable(Page* page) {
- DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index fa1291f6f3..51788b41e4 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
ishell@chromium.org
jkummerow@chromium.org
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 98c86c2263..7aebf857a2 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -107,12 +107,12 @@ void AccessorAssembler::HandlePolymorphicCase(
// Load the {feedback} array length.
TNode<IntPtrT> length = LoadAndUntagWeakFixedArrayLength(feedback);
- CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(1), length));
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(kEntrySize), length));
- // This is a hand-crafted loop that only compares against the {length}
- // in the end, since we already know that we will have at least a single
- // entry in the {feedback} array anyways.
- TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ // This is a hand-crafted loop that iterates backwards and only compares
+ // against zero at the end, since we already know that we will have at least a
+ // single entry in the {feedback} array anyways.
+ TVARIABLE(IntPtrT, var_index, IntPtrSub(length, IntPtrConstant(kEntrySize)));
Label loop(this, &var_index), loop_next(this);
Goto(&loop);
BIND(&loop);
@@ -131,18 +131,19 @@ void AccessorAssembler::HandlePolymorphicCase(
BIND(&loop_next);
var_index =
- Signed(IntPtrAdd(var_index.value(), IntPtrConstant(kEntrySize)));
- Branch(IntPtrLessThan(var_index.value(), length), &loop, if_miss);
+ Signed(IntPtrSub(var_index.value(), IntPtrConstant(kEntrySize)));
+ Branch(IntPtrGreaterThanOrEqual(var_index.value(), IntPtrConstant(0)),
+ &loop, if_miss);
}
}
void AccessorAssembler::HandleLoadICHandlerCase(
- const LoadICParameters* p, TNode<Object> handler, Label* miss,
+ const LazyLoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
ElementSupport support_elements, LoadAccessMode access_mode) {
Comment("have_handler");
- VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder());
VARIABLE(var_smi_handler, MachineRepresentation::kTagged, handler);
Variable* vars[] = {&var_holder, &var_smi_handler};
@@ -152,6 +153,14 @@ void AccessorAssembler::HandleLoadICHandlerCase(
Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+ BIND(&try_proto_handler);
+ {
+ GotoIf(IsCodeMap(LoadMap(CAST(handler))), &call_handler);
+ HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss, exit_point, ic_mode,
+ access_mode);
+ }
+
// |handler| is a Smi, encoding what to do. See SmiHandler methods
// for the encoding format.
BIND(&if_smi_handler);
@@ -161,49 +170,34 @@ void AccessorAssembler::HandleLoadICHandlerCase(
support_elements, access_mode);
}
- BIND(&try_proto_handler);
- {
- GotoIf(IsCodeMap(LoadMap(CAST(handler))), &call_handler);
- HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, exit_point, ic_mode,
- access_mode);
- }
-
BIND(&call_handler);
{
- exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, handler, p->context,
- p->receiver, p->name, p->slot, p->vector);
+ exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, handler,
+ p->context(), p->receiver(), p->name(),
+ p->slot(), p->vector());
}
}
-void AccessorAssembler::HandleLoadCallbackProperty(const LoadICParameters* p,
- TNode<JSObject> holder,
- TNode<WordT> handler_word,
- ExitPoint* exit_point) {
+void AccessorAssembler::HandleLoadCallbackProperty(
+ const LazyLoadICParameters* p, TNode<JSObject> holder,
+ TNode<WordT> handler_word, ExitPoint* exit_point) {
Comment("native_data_property_load");
TNode<IntPtrT> descriptor =
Signed(DecodeWord<LoadHandler::DescriptorBits>(handler_word));
- Label runtime(this, Label::kDeferred);
Callable callable = CodeFactory::ApiGetter(isolate());
TNode<AccessorInfo> accessor_info =
CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
- GotoIf(IsRuntimeCallStatsEnabled(), &runtime);
- exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
+ exit_point->ReturnCallStub(callable, p->context(), p->receiver(), holder,
accessor_info);
-
- BIND(&runtime);
- exit_point->ReturnCallRuntime(Runtime::kLoadCallbackProperty, p->context,
- p->receiver, holder, accessor_info, p->name);
}
void AccessorAssembler::HandleLoadAccessor(
- const LoadICParameters* p, TNode<CallHandlerInfo> call_handler_info,
+ const LazyLoadICParameters* p, TNode<CallHandlerInfo> call_handler_info,
TNode<WordT> handler_word, TNode<DataHandler> handler,
TNode<IntPtrT> handler_kind, ExitPoint* exit_point) {
Comment("api_getter");
- Label runtime(this, Label::kDeferred);
// Context is stored either in data2 or data3 field depending on whether
// the access check is enabled for this handler or not.
TNode<MaybeObject> maybe_context = Select<MaybeObject>(
@@ -215,39 +209,31 @@ void AccessorAssembler::HandleLoadAccessor(
CSA_CHECK(this, IsNotCleared(maybe_context));
TNode<Object> context = GetHeapObjectAssumeWeak(maybe_context);
- GotoIf(IsRuntimeCallStatsEnabled(), &runtime);
- {
- TNode<Foreign> foreign = CAST(
- LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset));
- TNode<WordT> callback = TNode<WordT>::UncheckedCast(LoadObjectField(
- foreign, Foreign::kForeignAddressOffset, MachineType::Pointer()));
- TNode<Object> data =
- LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
-
- VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
- Label load(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &load);
+ TNode<Foreign> foreign = CAST(
+ LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset));
+ TNode<WordT> callback = TNode<WordT>::UncheckedCast(LoadObjectField(
+ foreign, Foreign::kForeignAddressOffset, MachineType::Pointer()));
+ TNode<Object> data =
+ LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
- CSA_ASSERT(
- this,
- WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
+ VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver());
+ Label load(this);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
+ &load);
- api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
- Goto(&load);
+ CSA_ASSERT(
+ this,
+ WordEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
- BIND(&load);
- Callable callable = CodeFactory::CallApiCallback(isolate());
- TNode<IntPtrT> argc = IntPtrConstant(0);
- exit_point->Return(CallStub(callable, context, callback, argc, data,
- api_holder.value(), p->receiver));
- }
+ api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver())));
+ Goto(&load);
- BIND(&runtime);
- exit_point->ReturnCallRuntime(Runtime::kLoadAccessorProperty, context,
- p->receiver, SmiTag(handler_kind),
- call_handler_info);
+ BIND(&load);
+ Callable callable = CodeFactory::CallApiCallback(isolate());
+ TNode<IntPtrT> argc = IntPtrConstant(0);
+ exit_point->Return(CallStub(callable, context, callback, argc, data,
+ api_holder.value(), p->receiver()));
}
void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
@@ -305,7 +291,7 @@ TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
- const LoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler,
+ const LazyLoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler,
SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
OnNonExistent on_nonexistent, ElementSupport support_elements,
LoadAccessMode access_mode) {
@@ -333,7 +319,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&if_element);
Comment("element_load");
- Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* intptr_index = TryToIntptr(p->name(), miss);
Node* is_jsarray_condition =
IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
Node* elements_kind =
@@ -399,7 +385,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Label if_oob(this, Label::kDeferred);
Comment("indexed string");
- Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* intptr_index = TryToIntptr(p->name(), miss);
Node* length = LoadStringLengthAsWord(holder);
GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
@@ -430,7 +416,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
}
void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
- const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
TNode<WordT> handler_word, Label* rebox_double, Variable* var_double_value,
SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
OnNonExistent on_nonexistent, ElementSupport support_elements) {
@@ -438,7 +424,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
interceptor(this, Label::kDeferred), nonexistent(this),
accessor(this, Label::kDeferred), global(this, Label::kDeferred),
module_export(this, Label::kDeferred), proxy(this, Label::kDeferred),
- native_data_property(this), api_getter(this);
+ native_data_property(this, Label::kDeferred),
+ api_getter(this, Label::kDeferred);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &field);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kConstant)),
@@ -479,8 +466,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&nonexistent);
// This is a handler for a load of a non-existent value.
if (on_nonexistent == OnNonExistent::kThrowReferenceError) {
- exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
- p->name);
+ exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context(),
+ p->name());
} else {
DCHECK_EQ(OnNonExistent::kReturnUndefined, on_nonexistent);
exit_point->Return(UndefinedConstant());
@@ -502,7 +489,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
TVARIABLE(IntPtrT, var_name_index);
Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name), &found,
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
&var_name_index, miss);
BIND(&found);
{
@@ -511,7 +498,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
LoadPropertyFromNameDictionary(properties, var_name_index.value(),
&var_details, &var_value);
Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, p->receiver, miss);
+ p->context(), p->receiver(), miss);
exit_point->Return(value);
}
}
@@ -527,7 +514,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(getter)));
Callable callable = CodeFactory::Call(isolate());
- exit_point->Return(CallJS(callable, p->context, getter, p->receiver));
+ exit_point->Return(CallJS(callable, p->context(), getter, p->receiver()));
}
BIND(&native_data_property);
@@ -548,13 +535,13 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
if (support_elements == kSupportElements) {
DCHECK_NE(on_nonexistent, OnNonExistent::kThrowReferenceError);
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique,
&to_name_failed);
BIND(&if_unique_name);
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, holder, var_unique.value(), p->receiver,
+ p->context(), holder, var_unique.value(), p->receiver(),
SmiConstant(on_nonexistent));
BIND(&if_index);
@@ -563,13 +550,15 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
Goto(&to_name_failed);
BIND(&to_name_failed);
+ // TODO(duongn): use GetPropertyWithReceiver builtin once
+ // |lookup_element_in_holder| supports elements.
exit_point->ReturnCallRuntime(Runtime::kGetPropertyWithReceiver,
- p->context, holder, p->name, p->receiver,
- SmiConstant(on_nonexistent));
+ p->context(), holder, p->name(),
+ p->receiver(), SmiConstant(on_nonexistent));
} else {
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, holder, p->name, p->receiver,
+ p->context(), holder, p->name(), p->receiver(),
SmiConstant(on_nonexistent));
}
}
@@ -583,16 +572,16 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
holder, PropertyCell::kPropertyDetailsRawOffset);
GotoIf(IsTheHole(value), miss);
- exit_point->Return(
- CallGetterIfAccessor(value, details, p->context, p->receiver, miss));
+ exit_point->Return(CallGetterIfAccessor(value, details, p->context(),
+ p->receiver(), miss));
}
BIND(&interceptor);
{
Comment("load_interceptor");
exit_point->ReturnCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- p->context, p->name, p->receiver, holder,
- p->slot, p->vector);
+ p->context(), p->name(), p->receiver(),
+ holder, p->slot(), p->vector());
}
BIND(&module_export);
@@ -600,7 +589,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
Comment("module export");
Node* index = DecodeWord<LoadHandler::ExportsIndexBits>(handler_word);
Node* module =
- LoadObjectField(p->receiver, JSModuleNamespace::kModuleOffset,
+ LoadObjectField(p->receiver(), JSModuleNamespace::kModuleOffset,
MachineType::TaggedPointer());
TNode<ObjectHashTable> exports = CAST(LoadObjectField(
module, Module::kExportsOffset, MachineType::TaggedPointer()));
@@ -615,8 +604,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&is_the_hole);
{
Node* message = SmiConstant(MessageTemplate::kNotDefined);
- exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
- message, p->name);
+ exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context(),
+ message, p->name());
}
}
@@ -625,7 +614,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
}
void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
- const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
Label* miss, ExitPoint* exit_point) {
Label return_true(this), return_false(this), return_lookup(this),
normal(this), global(this);
@@ -676,8 +665,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
WordEqual(handler_kind,
IntPtrConstant(LoadHandler::kModuleExport)))));
exit_point->ReturnCallStub(
- Builtins::CallableFor(isolate(), Builtins::kHasProperty), p->context,
- p->receiver, p->name);
+ Builtins::CallableFor(isolate(), Builtins::kHasProperty), p->context(),
+ p->receiver(), p->name());
}
BIND(&normal);
@@ -686,7 +675,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
TVARIABLE(IntPtrT, var_name_index);
Label found(this);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name), &found,
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
&var_name_index, miss);
BIND(&found);
@@ -780,8 +769,8 @@ Node* AccessorAssembler::HandleProtoHandler(
CSA_ASSERT(this, IsWeakOrCleared(data2));
TNode<Object> expected_native_context =
GetHeapObjectAssumeWeak(data2, miss);
- EmitAccessCheck(expected_native_context, p->context, p->receiver, &done,
- miss);
+ EmitAccessCheck(expected_native_context, p->context(), p->receiver(),
+ &done, miss);
}
// Dictionary lookup on receiver is not necessary for Load/StoreGlobalIC
@@ -791,14 +780,14 @@ Node* AccessorAssembler::HandleProtoHandler(
{
DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
CSA_ASSERT(this, Word32BinaryNot(HasInstanceType(
- p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+ p->receiver(), JS_GLOBAL_OBJECT_TYPE)));
TNode<NameDictionary> properties =
- CAST(LoadSlowProperties(p->receiver));
+ CAST(LoadSlowProperties(p->receiver()));
TVARIABLE(IntPtrT, var_name_index);
Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name), &found,
- &var_name_index, &done);
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
+ &found, &var_name_index, &done);
BIND(&found);
{
if (on_found_on_receiver) {
@@ -816,7 +805,7 @@ Node* AccessorAssembler::HandleProtoHandler(
}
void AccessorAssembler::HandleLoadICProtoHandler(
- const LoadICParameters* p, Node* handler, Variable* var_holder,
+ const LazyLoadICParameters* p, Node* handler, Variable* var_holder,
Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode, LoadAccessMode access_mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
@@ -837,7 +826,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
&var_value);
Node* value =
CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, p->receiver, miss);
+ p->context(), p->receiver(), miss);
exit_point->Return(value);
}
},
@@ -910,8 +899,8 @@ void AccessorAssembler::HandleStoreICNativeDataProperty(
Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_CHECK(this, IsAccessorInfo(accessor_info));
- TailCallRuntime(Runtime::kStoreCallbackProperty, p->context, p->receiver,
- holder, accessor_info, p->name, p->value);
+ TailCallRuntime(Runtime::kStoreCallbackProperty, p->context(), p->receiver(),
+ holder, accessor_info, p->name(), p->value());
}
void AccessorAssembler::HandleStoreICHandlerCase(
@@ -927,7 +916,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
// for the encoding format.
BIND(&if_smi_handler);
{
- Node* holder = p->receiver;
+ Node* holder = p->receiver();
Node* handler_word = SmiUntag(CAST(handler));
Label if_fast_smi(this), if_proxy(this);
@@ -949,7 +938,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index);
NameDictionaryLookup<NameDictionary>(
- properties, CAST(p->name), &dictionary_found, &var_name_index, miss);
+ properties, CAST(p->name()), &dictionary_found, &var_name_index, miss);
BIND(&dictionary_found);
{
Node* details = LoadDetailsByKeyIndex<NameDictionary>(
@@ -961,8 +950,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- p->value);
- Return(p->value);
+ p->value());
+ Return(p->value());
}
BIND(&if_fast_smi);
@@ -984,7 +973,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&data);
// Handle non-transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, miss);
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value(), miss);
}
BIND(&if_proxy);
@@ -1008,8 +997,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&call_handler);
{
TailCallStub(StoreWithVectorDescriptor{}, CAST(strong_handler),
- CAST(p->context), p->receiver, p->name, p->value, p->slot,
- p->vector);
+ p->context(), p->receiver(), p->name(), p->value(),
+ p->slot(), p->vector());
}
}
@@ -1027,7 +1016,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
{
TNode<PropertyCell> property_cell = CAST(map_or_property_cell);
ExitPoint direct_exit(this);
- StoreGlobalIC_PropertyCellCase(property_cell, p->value, &direct_exit,
+ StoreGlobalIC_PropertyCellCase(property_cell, p->value(), &direct_exit,
miss);
}
BIND(&store_transition);
@@ -1035,7 +1024,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
TNode<Map> map = CAST(map_or_property_cell);
HandleStoreICTransitionMapHandlerCase(p, map, miss,
kCheckPrototypeValidity);
- Return(p->value);
+ Return(p->value());
}
}
}
@@ -1064,10 +1053,10 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor)));
if (flags & kValidateTransitionHandler) {
TNode<Name> key = LoadKeyByKeyIndex(descriptors, last_key_index);
- GotoIf(WordNotEqual(key, p->name), miss);
+ GotoIf(WordNotEqual(key, p->name()), miss);
} else {
CSA_ASSERT(this, WordEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
- p->name));
+ p->name()));
}
Node* details = LoadDetailsByKeyIndex(descriptors, last_key_index);
if (flags & kValidateTransitionHandler) {
@@ -1088,14 +1077,14 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
// DontEnum attribute is allowed only for private symbols and vice versa.
Branch(Word32Equal(
IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
- IsPrivateSymbol(p->name)),
+ IsPrivateSymbol(p->name())),
&attributes_ok, miss);
BIND(&attributes_ok);
}
- OverwriteExistingFastDataProperty(p->receiver, transition_map, descriptors,
- last_key_index, details, p->value, miss,
+ OverwriteExistingFastDataProperty(p->receiver(), transition_map, descriptors,
+ last_key_index, details, p->value(), miss,
true);
}
@@ -1207,7 +1196,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
&double_rep, &tagged_rep);
BIND(&double_rep);
{
- TNode<Float64T> double_value = ChangeNumberToFloat64(value);
+ TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
if (FLAG_unbox_double_fields) {
if (do_transitioning_store) {
StoreMap(object, object_map);
@@ -1275,7 +1264,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
Int32Constant(Representation::kDouble)),
&cont);
{
- Node* double_value = ChangeNumberToFloat64(value);
+ Node* double_value = ChangeNumberToFloat64(CAST(value));
Node* mutable_heap_number =
AllocateMutableHeapNumberWithValue(double_value);
var_value.Bind(mutable_heap_number);
@@ -1301,7 +1290,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
{
Node* mutable_heap_number =
LoadPropertyArrayElement(properties, backing_store_index);
- TNode<Float64T> double_value = ChangeNumberToFloat64(value);
+ TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
Label if_mutable(this);
GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
@@ -1370,7 +1359,7 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
Callable callable = CodeFactory::Call(isolate());
- Return(CallJS(callable, p->context, setter, p->receiver, p->value));
+ Return(CallJS(callable, p->context(), setter, p->receiver(), p->value()));
}
void AccessorAssembler::HandleStoreICProtoHandler(
@@ -1388,8 +1377,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
&if_transitioning_element_store);
BIND(&if_element_store);
{
- TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context,
- p->receiver, p->name, p->value, p->slot, p->vector);
+ TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
+ p->receiver(), p->name(), p->value(), p->slot(),
+ p->vector());
}
BIND(&if_transitioning_element_store);
@@ -1401,9 +1391,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(IsDeprecatedMap(transition_map), miss);
- TailCallStub(StoreTransitionDescriptor{}, code_handler, p->context,
- p->receiver, p->name, transition_map, p->value, p->slot,
- p->vector);
+ TailCallStub(StoreTransitionDescriptor{}, code_handler, p->context(),
+ p->receiver(), p->name(), transition_map, p->value(),
+ p->slot(), p->vector());
}
};
}
@@ -1422,8 +1412,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
StoreValueByKeyIndex<NameDictionary>(
- CAST(properties), UncheckedCast<IntPtrT>(name_index), p->value);
- Return(p->value);
+ CAST(properties), UncheckedCast<IntPtrT>(name_index), p->value());
+ Return(p->value());
},
miss, ic_mode);
@@ -1470,16 +1460,17 @@ void AccessorAssembler::HandleStoreICProtoHandler(
// case is covered above by LookupOnReceiver bit handling of the smi
// handler.
Label slow(this);
- TNode<Map> receiver_map = LoadMap(p->receiver);
+ TNode<Map> receiver_map = LoadMap(p->receiver());
InvalidateValidityCellIfPrototype(receiver_map);
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(p->receiver));
- Add<NameDictionary>(properties, CAST(p->name), p->value, &slow);
- Return(p->value);
+ TNode<NameDictionary> properties =
+ CAST(LoadSlowProperties(p->receiver()));
+ Add<NameDictionary>(properties, CAST(p->name()), p->value(), &slow);
+ Return(p->value());
BIND(&slow);
- TailCallRuntime(Runtime::kAddDictionaryProperty, p->context, p->receiver,
- p->name, p->value);
+ TailCallRuntime(Runtime::kAddDictionaryProperty, p->context(),
+ p->receiver(), p->name(), p->value());
}
BIND(&if_accessor);
@@ -1513,7 +1504,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Node* data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
- VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
+ VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver());
Label store(this);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)),
&store);
@@ -1523,20 +1514,20 @@ void AccessorAssembler::HandleStoreICProtoHandler(
WordEqual(handler_kind,
IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype)));
- api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
+ api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver())));
Goto(&store);
BIND(&store);
Callable callable = CodeFactory::CallApiCallback(isolate());
TNode<IntPtrT> argc = IntPtrConstant(1);
Return(CallStub(callable, context, callback, argc, data,
- api_holder.value(), p->receiver, p->value));
+ api_holder.value(), p->receiver(), p->value()));
}
BIND(&if_store_global_proxy);
{
ExitPoint direct_exit(this);
- StoreGlobalIC_PropertyCellCase(holder, p->value, &direct_exit, miss);
+ StoreGlobalIC_PropertyCellCase(holder, p->value(), &direct_exit, miss);
}
}
}
@@ -1551,13 +1542,13 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
to_name_failed(this, Label::kDeferred);
if (support_elements == kSupportElements) {
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique,
&to_name_failed);
BIND(&if_unique_name);
- CallBuiltin(Builtins::kProxySetProperty, p->context, proxy,
- var_unique.value(), p->value, p->receiver);
- Return(p->value);
+ CallBuiltin(Builtins::kProxySetProperty, p->context(), proxy,
+ var_unique.value(), p->value(), p->receiver());
+ Return(p->value());
// The index case is handled earlier by the runtime.
BIND(&if_index);
@@ -1566,12 +1557,12 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
Goto(&to_name_failed);
BIND(&to_name_failed);
- TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context, proxy,
- p->name, p->value, p->receiver);
+ TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context(), proxy,
+ p->name(), p->value(), p->receiver());
} else {
- Node* name = CallBuiltin(Builtins::kToName, p->context, p->name);
- TailCallBuiltin(Builtins::kProxySetProperty, p->context, proxy, name,
- p->value, p->receiver);
+ Node* name = CallBuiltin(Builtins::kToName, p->context(), p->name());
+ TailCallBuiltin(Builtins::kProxySetProperty, p->context(), proxy, name,
+ p->value(), p->receiver());
}
}
@@ -1979,7 +1970,7 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("typed elements");
// Check if buffer has been detached.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
GotoIf(IsDetachedBuffer(buffer), miss);
// Bounds check.
@@ -1988,7 +1979,8 @@ void AccessorAssembler::EmitElementLoad(
if (access_mode == LoadAccessMode::kHas) {
exit_point->Return(TrueConstant());
} else {
- Node* backing_store = LoadJSTypedArrayBackingStore(CAST(object));
+ TNode<RawPtrT> backing_store =
+ LoadJSTypedArrayBackingStore(CAST(object));
Label uint8_elements(this), int8_elements(this), uint16_elements(this),
int16_elements(this), uint32_elements(this), int32_elements(this),
@@ -2102,13 +2094,13 @@ void AccessorAssembler::NameDictionaryNegativeLookup(Node* object,
}
void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map,
- Node* bitfield2) {
+ Node* bitfield3) {
Label is_prototype(this), cont(this);
- if (bitfield2 == nullptr) {
- bitfield2 = LoadMapBitField2(map);
+ if (bitfield3 == nullptr) {
+ bitfield3 = LoadMapBitField3(map);
}
- Branch(IsSetWord32(bitfield2, Map::IsPrototypeMapBit::kMask), &is_prototype,
+ Branch(IsSetWord32(bitfield3, Map::IsPrototypeMapBit::kMask), &is_prototype,
&cont);
BIND(&is_prototype);
@@ -2217,7 +2209,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
TVARIABLE(IntPtrT, var_name_index);
Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
: &lookup_prototype_chain;
- DescriptorLookup(p->name, descriptors, bitfield3, &if_descriptor_found,
+ DescriptorLookup(p->name(), descriptors, bitfield3, &if_descriptor_found,
&var_name_index, notfound);
BIND(&if_descriptor_found);
@@ -2232,20 +2224,21 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
Label stub_cache(this);
BIND(&try_stub_cache);
// When there is no feedback vector don't use stub cache.
- GotoIfNot(IsUndefined(p->vector), &stub_cache);
+ GotoIfNot(IsUndefined(p->vector()), &stub_cache);
// Fall back to the slow path for private symbols.
- Branch(IsPrivateSymbol(p->name), slow, &lookup_prototype_chain);
+ Branch(IsPrivateSymbol(p->name()), slow, &lookup_prototype_chain);
BIND(&stub_cache);
Comment("stub cache probe for fast property load");
TVARIABLE(MaybeObject, var_handler);
Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name,
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name(),
&found_handler, &var_handler, &stub_cache_miss);
BIND(&found_handler);
{
- HandleLoadICHandlerCase(p, CAST(var_handler.value()), &stub_cache_miss,
- &direct_exit);
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()),
+ &stub_cache_miss, &direct_exit);
}
BIND(&stub_cache_miss);
@@ -2253,8 +2246,8 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// TODO(jkummerow): Check if the property exists on the prototype
// chain. If it doesn't, then there's no point in missing.
Comment("KeyedLoadGeneric_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context(), p->receiver(),
+ p->name(), p->slot(), p->vector());
}
}
@@ -2267,7 +2260,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index);
TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name),
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
&dictionary_found, &var_name_index,
&lookup_prototype_chain);
BIND(&dictionary_found);
@@ -2281,7 +2274,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
BIND(&if_found_on_receiver);
{
Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, receiver, slow);
+ p->context(), receiver, slow);
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
Return(value);
}
@@ -2290,14 +2283,14 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
{
VARIABLE(var_holder_map, MachineRepresentation::kTagged);
VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32);
- Label return_undefined(this);
+ Label return_undefined(this), is_private_symbol(this);
Variable* merged_variables[] = {&var_holder_map, &var_holder_instance_type};
Label loop(this, arraysize(merged_variables), merged_variables);
var_holder_map.Bind(receiver_map);
var_holder_instance_type.Bind(instance_type);
- // Private symbols must not be looked up on the prototype chain.
- GotoIf(IsPrivateSymbol(p->name), &return_undefined);
+ GotoIf(IsPrivateSymbol(p->name()), &is_private_symbol);
+
Goto(&loop);
BIND(&loop);
{
@@ -2312,9 +2305,9 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
var_holder_map.Bind(proto_map);
var_holder_instance_type.Bind(proto_instance_type);
Label next_proto(this), return_value(this, &var_value), goto_slow(this);
- TryGetOwnProperty(p->context, receiver, proto, proto_map,
- proto_instance_type, p->name, &return_value, &var_value,
- &next_proto, &goto_slow);
+ TryGetOwnProperty(p->context(), receiver, proto, proto_map,
+ proto_instance_type, p->name(), &return_value,
+ &var_value, &next_proto, &goto_slow);
// This trampoline and the next are required to appease Turbofan's
// variable merging.
@@ -2328,6 +2321,16 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
Return(var_value.value());
}
+ BIND(&is_private_symbol);
+ {
+ CSA_ASSERT(this, IsPrivateSymbol(p->name()));
+
+ // For private names that don't exist on the receiver, we bail
+ // to the runtime to throw. For private symbols, we just return
+ // undefined.
+ Branch(IsPrivateName(p->name()), slow, &return_undefined);
+ }
+
BIND(&return_undefined);
Return(UndefinedConstant());
}
@@ -2338,11 +2341,11 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
GotoIfNot(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), slow);
// Private field/symbol lookup is not supported.
- GotoIf(IsPrivateSymbol(p->name), slow);
+ GotoIf(IsPrivateSymbol(p->name()), slow);
direct_exit.ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, receiver /*holder is the same as receiver*/, p->name,
+ p->context(), receiver /*holder is the same as receiver*/, p->name(),
receiver, SmiConstant(OnNonExistent::kReturnUndefined));
}
}
@@ -2400,26 +2403,24 @@ void AccessorAssembler::TryProbeStubCacheTable(
const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
- // Check that the key in the entry matches the name.
Node* key_base = ExternalConstant(
ExternalReference::Create(stub_cache->key_reference(table)));
- Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
- GotoIf(WordNotEqual(name, entry_key), if_miss);
-
- // Get the map entry from the cache.
- DCHECK_EQ(kSystemPointerSize * 2,
- stub_cache->map_reference(table).address() -
- stub_cache->key_reference(table).address());
- Node* entry_map =
- Load(MachineType::Pointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kSystemPointerSize * 2)));
- GotoIf(WordNotEqual(map, entry_map), if_miss);
-
- DCHECK_EQ(kSystemPointerSize, stub_cache->value_reference(table).address() -
- stub_cache->key_reference(table).address());
+
+ // Check that the key in the entry matches the name.
+ DCHECK_EQ(0, offsetof(StubCache::Entry, key));
+ Node* cached_key = Load(MachineType::TaggedPointer(), key_base, entry_offset);
+ GotoIf(WordNotEqual(name, cached_key), if_miss);
+
+ // Check that the map in the entry matches.
+ Node* cached_map = Load(
+ MachineType::TaggedPointer(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(offsetof(StubCache::Entry, map))));
+ GotoIf(WordNotEqual(map, cached_map), if_miss);
+
TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
Load(MachineType::AnyTagged(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kSystemPointerSize))));
+ IntPtrAdd(entry_offset,
+ IntPtrConstant(offsetof(StubCache::Entry, value)))));
// We found the handler.
*var_handler = handler;
@@ -2462,7 +2463,7 @@ void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
//////////////////// Entry points into private implementation (one per stub).
-void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
+void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
ExitPoint* exit_point) {
// Must be kept in sync with LoadIC.
@@ -2476,10 +2477,10 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Label stub_call(this, Label::kDeferred), miss(this, Label::kDeferred),
no_feedback(this, Label::kDeferred);
- Node* recv_map = LoadReceiverMap(p->receiver);
+ Node* recv_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(recv_map), &miss);
- GotoIf(IsUndefined(p->vector), &no_feedback);
+ GotoIf(IsUndefined(p->vector()), &no_feedback);
// Inlined fast path.
{
@@ -2489,7 +2490,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Label try_polymorphic(this), if_handler(this, &var_handler);
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, recv_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), recv_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -2513,8 +2514,9 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Callable ic =
Builtins::CallableFor(isolate(), Builtins::kLoadIC_Noninlined);
Node* code_target = HeapConstant(ic.code());
- exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context,
- p->receiver, p->name, p->slot, p->vector);
+ exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context(),
+ p->receiver(), p->name(), p->slot(),
+ p->vector());
}
BIND(&no_feedback);
@@ -2523,15 +2525,16 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
// Call into the stub that implements the non-inlined parts of LoadIC.
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
- p->context, p->receiver, p->name, p->slot, p->vector);
+ p->context(), p->receiver(), p->name(), p->slot(), p->vector());
}
BIND(&miss);
{
Comment("LoadIC_BytecodeHandler_miss");
- exit_point->ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context,
- p->receiver, p->name, p->slot, p->vector);
+ exit_point->ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context(),
+ p->receiver(), p->name(), p->slot(),
+ p->vector());
}
}
@@ -2544,15 +2547,19 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
Label if_handler(this, &var_handler), non_inlined(this, Label::kDeferred),
try_polymorphic(this), miss(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
- HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit);
+ {
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit);
+ }
BIND(&try_polymorphic);
TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
@@ -2571,8 +2578,9 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
}
BIND(&miss);
- direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
+ direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context(),
+ p->receiver(), p->name(), p->slot(),
+ p->vector());
}
void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
@@ -2595,7 +2603,7 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
GotoIfNot(WordEqual(feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&try_uninitialized);
- TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
+ TryProbeStubCache(isolate()->load_stub_cache(), p->receiver(), p->name(),
if_handler, var_handler, miss);
}
@@ -2606,24 +2614,24 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
miss);
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
- p->context, p->receiver, p->name, p->slot, p->vector);
+ p->context(), p->receiver(), p->name(), p->slot(), p->vector());
}
}
void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
Label miss(this, Label::kDeferred),
check_function_prototype(this);
- Node* receiver = p->receiver;
+ Node* receiver = p->receiver();
GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
- GotoIf(IsUndefined(p->vector), &check_function_prototype);
+ GotoIf(IsUndefined(p->vector()), &check_function_prototype);
// Optimistically write the state transition to the vector.
- StoreFeedbackVectorSlot(p->vector, p->slot,
+ StoreFeedbackVectorSlot(p->vector(), p->slot(),
LoadRoot(RootIndex::kpremonomorphic_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
- StoreWeakReferenceInFeedbackVector(p->vector, p->slot, receiver_map,
+ StoreWeakReferenceInFeedbackVector(p->vector(), p->slot(), receiver_map,
kTaggedSize, SMI_PARAMETERS);
Goto(&check_function_prototype);
@@ -2634,7 +2642,7 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
Label not_function_prototype(this, Label::kDeferred);
GotoIfNot(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
&not_function_prototype);
- GotoIfNot(IsPrototypeString(p->name), &not_function_prototype);
+ GotoIfNot(IsPrototypeString(p->name()), &not_function_prototype);
GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), CAST(receiver_map),
&not_function_prototype);
@@ -2648,16 +2656,16 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
BIND(&miss);
{
Label call_runtime(this, Label::kDeferred);
- GotoIf(IsUndefined(p->vector), &call_runtime);
+ GotoIf(IsUndefined(p->vector()), &call_runtime);
// Undo the optimistic state transition.
- StoreFeedbackVectorSlot(p->vector, p->slot,
+ StoreFeedbackVectorSlot(p->vector(), p->slot(),
LoadRoot(RootIndex::kuninitialized_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
Goto(&call_runtime);
BIND(&call_runtime);
- TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
- p->slot, p->vector);
+ TailCallRuntime(Runtime::kLoadIC_Miss, p->context(), p->receiver(),
+ p->name(), p->slot(), p->vector());
}
}
@@ -2750,8 +2758,8 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
CAST(LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX));
Node* holder = LoadContextElement(native_context, Context::EXTENSION_INDEX);
- LoadICParameters p(context, receiver, lazy_name(),
- ParameterToTagged(slot, slot_mode), vector, holder);
+ LazyLoadICParameters p([=] { return context; }, receiver, lazy_name,
+ ParameterToTagged(slot, slot_mode), vector, holder);
HandleLoadICHandlerCase(&p, handler, miss, exit_point, ICMode::kGlobalIC,
on_nonexistent);
@@ -2767,20 +2775,22 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
try_polymorphic_name(this, Label::kDeferred),
miss(this, Label::kDeferred), generic(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
- GotoIf(IsUndefined(p->vector), &generic);
+ GotoIf(IsUndefined(p->vector()), &generic);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
- HandleLoadICHandlerCase(
- p, CAST(var_handler.value()), &miss, &direct_exit, ICMode::kNonGlobalIC,
- OnNonExistent::kReturnUndefined, kSupportElements, access_mode);
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit, ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kSupportElements,
+ access_mode);
}
BIND(&try_polymorphic);
@@ -2807,14 +2817,15 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
TailCallBuiltin(access_mode == LoadAccessMode::kLoad
? Builtins::kKeyedLoadIC_Megamorphic
: Builtins::kKeyedHasIC_Megamorphic,
- p->context, p->receiver, p->name, p->slot, p->vector);
+ p->context(), p->receiver(), p->name(), p->slot(),
+ p->vector());
}
BIND(&try_polymorphic_name);
{
// We might have a name in feedback, and a weak fixed array in the next
// slot.
- Node* name = p->name;
+ Node* name = p->name();
Comment("KeyedLoadIC_try_polymorphic_name");
VARIABLE(var_name, MachineRepresentation::kTagged, name);
VARIABLE(var_index, MachineType::PointerRepresentation());
@@ -2857,36 +2868,37 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
TailCallBuiltin(access_mode == LoadAccessMode::kLoad
? Builtins::kKeyedLoadIC_PolymorphicName
: Builtins::kKeyedHasIC_PolymorphicName,
- p->context, p->receiver, name, p->slot, p->vector);
+ p->context(), p->receiver(), name, p->slot(),
+ p->vector());
}
}
BIND(&miss);
{
Comment("KeyedLoadIC_miss");
- TailCallRuntime(access_mode == LoadAccessMode::kLoad
- ? Runtime::kKeyedLoadIC_Miss
- : Runtime::kKeyedHasIC_Miss,
- p->context, p->receiver, p->name, p->slot, p->vector);
+ TailCallRuntime(
+ access_mode == LoadAccessMode::kLoad ? Runtime::kKeyedLoadIC_Miss
+ : Runtime::kKeyedHasIC_Miss,
+ p->context(), p->receiver(), p->name(), p->slot(), p->vector());
}
}
void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged, p->name);
+ VARIABLE(var_unique, MachineRepresentation::kTagged, p->name());
Label if_index(this), if_unique_name(this), if_notunique(this),
if_other(this, Label::kDeferred), if_runtime(this, Label::kDeferred);
- Node* receiver = p->receiver;
+ Node* receiver = p->receiver();
GotoIf(TaggedIsSmi(receiver), &if_runtime);
GotoIf(IsNullOrUndefined(receiver), &if_runtime);
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique,
&if_other, &if_notunique);
BIND(&if_other);
{
- Node* name = CallBuiltin(Builtins::kToName, p->context, p->name);
+ Node* name = CallBuiltin(Builtins::kToName, p->context(), p->name());
var_unique.Bind(name);
TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique,
&if_runtime, &if_notunique);
@@ -2902,8 +2914,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
BIND(&if_unique_name);
{
- LoadICParameters pp = *p;
- pp.name = var_unique.value();
+ LoadICParameters pp(p, var_unique.value());
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
@@ -2929,8 +2940,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
// successfully internalizing the incoming string. Past experiments
// with this have shown that it causes too much traffic on the stub
// cache. We may want to re-evaluate that in the future.
- LoadICParameters pp = *p;
- pp.name = var_unique.value();
+ LoadICParameters pp(p, var_unique.value());
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
@@ -2946,7 +2956,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
Comment("KeyedLoadGeneric_slow");
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
// TODO(jkummerow): Should we use the GetProperty TF stub instead?
- TailCallRuntime(Runtime::kGetProperty, p->context, p->receiver,
+ TailCallRuntime(Runtime::kGetProperty, p->context(), p->receiver(),
var_unique.value());
}
}
@@ -2956,12 +2966,12 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
- Node* receiver = p->receiver;
+ Node* receiver = p->receiver();
Node* receiver_map = LoadReceiverMap(receiver);
- Node* name = p->name;
- Node* vector = p->vector;
- Node* slot = p->slot;
- Node* context = p->context;
+ Node* name = p->name();
+ Node* vector = p->vector();
+ Node* slot = p->slot();
+ TNode<Context> context = p->context();
// When we get here, we know that the {name} matches the recorded
// feedback name in the {vector} and can safely be used for the
@@ -2980,9 +2990,11 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
BIND(&if_handler);
{
ExitPoint direct_exit(this);
- HandleLoadICHandlerCase(
- p, CAST(var_handler.value()), &miss, &direct_exit, ICMode::kNonGlobalIC,
- OnNonExistent::kReturnUndefined, kOnlyProperties, access_mode);
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit, ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kOnlyProperties,
+ access_mode);
}
BIND(&miss);
@@ -3006,14 +3018,14 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
try_uninitialized(this, Label::kDeferred), miss(this, Label::kDeferred),
no_feedback(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
- GotoIf(IsUndefined(p->vector), &no_feedback);
+ GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3039,7 +3051,7 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&try_uninitialized);
- TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
+ TryProbeStubCache(isolate()->store_stub_cache(), p->receiver(), p->name(),
&if_handler, &var_handler, &miss);
}
BIND(&try_uninitialized);
@@ -3052,21 +3064,22 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&no_feedback);
{
- TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context, p->receiver,
- p->name, p->value, p->slot, p->vector);
+ TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context(),
+ p->receiver(), p->name(), p->value(), p->slot(),
+ p->vector());
}
BIND(&miss);
{
- TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
+ TailCallRuntime(Runtime::kStoreIC_Miss, p->context(), p->value(), p->slot(),
+ p->vector(), p->receiver(), p->name());
}
}
void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
Label if_lexical_var(this), if_heapobject(this);
TNode<MaybeObject> maybe_weak_ref =
- LoadFeedbackVectorSlot(pp->vector, pp->slot, 0, SMI_PARAMETERS);
+ LoadFeedbackVectorSlot(pp->vector(), pp->slot(), 0, SMI_PARAMETERS);
Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_heapobject);
BIND(&if_heapobject);
@@ -3081,31 +3094,32 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, &try_handler));
ExitPoint direct_exit(this);
- StoreGlobalIC_PropertyCellCase(property_cell, pp->value, &direct_exit,
+ StoreGlobalIC_PropertyCellCase(property_cell, pp->value(), &direct_exit,
&miss);
BIND(&try_handler);
{
Comment("StoreGlobalIC_try_handler");
TNode<MaybeObject> handler = LoadFeedbackVectorSlot(
- pp->vector, pp->slot, kTaggedSize, SMI_PARAMETERS);
+ pp->vector(), pp->slot(), kTaggedSize, SMI_PARAMETERS);
GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)),
&miss);
- StoreICParameters p = *pp;
- DCHECK_NULL(p.receiver);
- Node* native_context = LoadNativeContext(p.context);
- p.receiver =
- LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
+ DCHECK_NULL(pp->receiver());
+ Node* native_context = LoadNativeContext(pp->context());
+ StoreICParameters p(
+ pp->context(),
+ LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX),
+ pp->name(), pp->value(), pp->slot(), pp->vector());
HandleStoreICHandlerCase(&p, handler, &miss, ICMode::kGlobalIC);
}
BIND(&miss);
{
- TailCallRuntime(Runtime::kStoreGlobalIC_Miss, pp->context, pp->value,
- pp->slot, pp->vector, pp->name);
+ TailCallRuntime(Runtime::kStoreGlobalIC_Miss, pp->context(), pp->value(),
+ pp->slot(), pp->vector(), pp->name());
}
}
@@ -3118,9 +3132,9 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
TNode<IntPtrT> slot_index =
Signed(DecodeWord<FeedbackNexus::SlotIndexBits>(lexical_handler));
TNode<Context> script_context =
- LoadScriptContext(CAST(pp->context), context_index);
- StoreContextElement(script_context, slot_index, pp->value);
- Return(pp->value);
+ LoadScriptContext(pp->context(), context_index);
+ StoreContextElement(script_context, slot_index, pp->value());
+ Return(pp->value());
}
}
@@ -3199,14 +3213,14 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
no_feedback(this, Label::kDeferred),
try_polymorphic_name(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
- GotoIf(IsUndefined(p->vector), &no_feedback);
+ GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3237,19 +3251,19 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
BIND(&no_feedback);
{
- TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context,
- p->receiver, p->name, p->value, p->slot);
+ TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context(),
+ p->receiver(), p->name(), p->value(), p->slot());
}
BIND(&try_polymorphic_name);
{
// We might have a name in feedback, and a fixed array in the next slot.
Comment("KeyedStoreIC_try_polymorphic_name");
- GotoIfNot(WordEqual(strong_feedback, p->name), &miss);
+ GotoIfNot(WordEqual(strong_feedback, p->name()), &miss);
// If the name comparison succeeded, we know we have a feedback vector
// with at least one map/handler pair.
TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(
- p->vector, p->slot, kTaggedSize, SMI_PARAMETERS);
+ p->vector(), p->slot(), kTaggedSize, SMI_PARAMETERS);
TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
&miss);
@@ -3258,8 +3272,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
BIND(&miss);
{
Comment("KeyedStoreIC_miss");
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context(), p->value(),
+ p->slot(), p->vector(), p->receiver(), p->name());
}
}
@@ -3272,13 +3286,13 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred);
- Node* array_map = LoadReceiverMap(p->receiver);
+ Node* array_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(array_map), &miss);
- GotoIf(IsUndefined(p->vector), &miss);
+ GotoIf(IsUndefined(p->vector()), &miss);
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, array_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), array_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -3289,8 +3303,9 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<HeapObject> handler = CAST(var_handler.value());
Label if_transitioning_element_store(this);
GotoIfNot(IsCode(handler), &if_transitioning_element_store);
- TailCallStub(StoreWithVectorDescriptor{}, CAST(handler), CAST(p->context),
- p->receiver, p->name, p->value, p->slot, p->vector);
+ TailCallStub(StoreWithVectorDescriptor{}, CAST(handler), p->context(),
+ p->receiver(), p->name(), p->value(), p->slot(),
+ p->vector());
BIND(&if_transitioning_element_store);
{
@@ -3301,8 +3316,9 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIf(IsDeprecatedMap(transition_map), &miss);
Node* code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
CSA_ASSERT(this, IsCode(code));
- TailCallStub(StoreTransitionDescriptor{}, code, p->context, p->receiver,
- p->name, transition_map, p->value, p->slot, p->vector);
+ TailCallStub(StoreTransitionDescriptor{}, code, p->context(),
+ p->receiver(), p->name(), transition_map, p->value(),
+ p->slot(), p->vector());
}
}
@@ -3327,16 +3343,17 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIfNot(
WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&miss);
- TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context,
- p->value, p->receiver, p->name);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context(),
+ p->value(), p->receiver(), p->name());
}
}
BIND(&miss);
{
Comment("StoreInArrayLiteralIC_miss");
- TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, p->context, p->value,
- p->slot, p->vector, p->receiver, p->name);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, p->context(),
+ p->value(), p->slot(), p->vector(), p->receiver(),
+ p->name());
}
}
@@ -3349,7 +3366,7 @@ void AccessorAssembler::GenerateLoadIC() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
LoadIC(&p);
@@ -3362,7 +3379,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3372,7 +3389,8 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
&var_handler, &miss);
BIND(&if_handler);
- LoadICParameters p(context, receiver, name, slot, vector);
+ LazyLoadICParameters p([=] { return context; }, receiver,
+ [=] { return CAST(name); }, slot, vector);
HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
BIND(&miss);
@@ -3387,7 +3405,7 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3403,7 +3421,11 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
&miss, &direct_exit);
BIND(&if_handler);
- HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
+ {
+ LazyLoadICParameters lazy_p(&p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit);
+ }
BIND(&miss);
direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
@@ -3417,7 +3439,7 @@ void AccessorAssembler::GenerateLoadIC_Uninitialized() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
LoadIC_Uninitialized(&p);
@@ -3429,7 +3451,7 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
@@ -3441,7 +3463,7 @@ void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC_Megamorphic, context, receiver, name, slot,
@@ -3454,13 +3476,13 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
LoadGlobalIC(
vector, slot,
// lazy_context
- [=] { return CAST(context); },
+ [=] { return context; },
// lazy_name
[=] { return CAST(name); }, typeof_mode, &direct_exit);
}
@@ -3470,7 +3492,7 @@ void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
Callable callable =
@@ -3485,7 +3507,7 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kLoad);
@@ -3498,7 +3520,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICGeneric(&p);
@@ -3510,7 +3532,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
@@ -3523,7 +3545,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, context, receiver, name,
@@ -3537,7 +3559,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kLoad);
@@ -3550,7 +3572,7 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, nullptr, name, value, slot, vector);
StoreGlobalIC(&p);
@@ -3562,7 +3584,7 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
@@ -3576,7 +3598,7 @@ void AccessorAssembler::GenerateStoreIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, receiver, name, value, slot, vector);
StoreIC(&p);
@@ -3589,7 +3611,7 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
@@ -3604,7 +3626,7 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, receiver, name, value, slot, vector);
KeyedStoreIC(&p);
@@ -3617,7 +3639,7 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
@@ -3632,7 +3654,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, array, index, value, slot, vector);
StoreInArrayLiteralIC(&p);
@@ -3640,7 +3662,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
void AccessorAssembler::GenerateCloneObjectIC_Slow() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<HeapObject> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Object> source = CAST(Parameter(Descriptor::kSource));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3655,7 +3677,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
LoadObjectField(object_fn, JSFunction::kPrototypeOrInitialMapOffset));
CSA_ASSERT(this, IsMap(initial_map));
- TNode<JSObject> result = CAST(AllocateJSObjectFromMap(initial_map));
+ TNode<JSObject> result = AllocateJSObjectFromMap(initial_map);
{
Label did_set_proto_if_needed(this);
@@ -3672,28 +3694,16 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
}
ReturnIf(IsNullOrUndefined(source), result);
+ source = ToObject_Inline(context, source);
- CSA_ASSERT(this, IsJSReceiver(source));
-
- Label call_runtime(this, Label::kDeferred);
- Label done(this);
-
- TNode<Map> map = LoadMap(source);
- TNode<Int32T> type = LoadMapInstanceType(map);
- {
- Label cont(this);
- GotoIf(IsJSObjectInstanceType(type), &cont);
- GotoIf(InstanceTypeEqual(type, JS_PROXY_TYPE), &call_runtime);
- GotoIfNot(IsStringInstanceType(type), &done);
- Branch(SmiEqual(LoadStringLengthAsSmi(CAST(source)), SmiConstant(0)), &done,
- &call_runtime);
- BIND(&cont);
- }
+ Label call_runtime(this, Label::kDeferred), done(this);
+ TNode<Map> source_map = LoadMap(CAST(source));
+ GotoIfNot(IsJSObjectMap(source_map), &call_runtime);
GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(source))), &call_runtime);
ForEachEnumerableOwnProperty(
- context, map, CAST(source), kPropertyAdditionOrder,
+ context, source_map, CAST(source), kPropertyAdditionOrder,
[=](TNode<Name> key, TNode<Object> value) {
SetPropertyInLiteral(context, result, key, value);
},
@@ -3710,17 +3720,17 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
void AccessorAssembler::GenerateCloneObjectIC() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<HeapObject> source = CAST(Parameter(Descriptor::kSource));
- Node* flags = Parameter(Descriptor::kFlags);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TVARIABLE(MaybeObject, var_handler);
- Label if_handler(this, &var_handler);
- Label miss(this, Label::kDeferred), try_polymorphic(this, Label::kDeferred),
+ Label if_handler(this, &var_handler), miss(this, Label::kDeferred),
+ try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred), slow(this, Label::kDeferred);
- TNode<Map> source_map = LoadMap(UncheckedCast<HeapObject>(source));
+ TNode<Map> source_map = LoadReceiverMap(source);
GotoIf(IsDeprecatedMap(source_map), &miss);
GotoIf(IsUndefined(vector), &slow);
@@ -3735,11 +3745,12 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// Handlers for the CloneObjectIC stub are weak references to the Map of
// a result object.
TNode<Map> result_map = CAST(var_handler.value());
- TVARIABLE(Object, var_properties, EmptyFixedArrayConstant());
- TVARIABLE(FixedArrayBase, var_elements, EmptyFixedArrayConstant());
+ TVARIABLE(HeapObject, var_properties, EmptyFixedArrayConstant());
+ TVARIABLE(FixedArray, var_elements, EmptyFixedArrayConstant());
Label allocate_object(this);
GotoIf(IsNullOrUndefined(source), &allocate_object);
+ CSA_SLOW_ASSERT(this, IsJSObjectMap(source_map));
CSA_SLOW_ASSERT(this, IsJSObjectMap(result_map));
// The IC fast case should only be taken if the result map a compatible
@@ -3753,7 +3764,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// either an Smi, or a PropertyArray.
// FIXME: Make a CSA macro for this
TNode<Object> source_properties =
- LoadObjectField(source, JSObject::kPropertiesOrHashOffset);
+ LoadObjectField(CAST(source), JSObject::kPropertiesOrHashOffset);
{
GotoIf(TaggedIsSmi(source_properties), &allocate_object);
GotoIf(IsEmptyFixedArray(source_properties), &allocate_object);
@@ -3779,8 +3790,6 @@ void AccessorAssembler::GenerateCloneObjectIC() {
ReturnIf(IsNullOrUndefined(source), object);
// Lastly, clone any in-object properties.
- // Determine the inobject property capacity of both objects, and copy the
- // smaller number into the resulting object.
TNode<IntPtrT> source_start =
LoadMapInobjectPropertiesStartInWords(source_map);
TNode<IntPtrT> source_size = LoadMapInstanceSizeInWords(source_map);
@@ -3789,35 +3798,49 @@ void AccessorAssembler::GenerateCloneObjectIC() {
TNode<IntPtrT> field_offset_difference =
TimesTaggedSize(IntPtrSub(result_start, source_start));
- // If MutableHeapNumbers may be present in-object, allocations may occur
- // within this loop, thus the write barrier is required.
- //
- // TODO(caitp): skip the write barrier until the first MutableHeapNumber
- // field is found
- const bool may_use_mutable_heap_numbers = !FLAG_unbox_double_fields;
-
+ // Just copy the fields as raw data (pretending that there are no
+ // MutableHeapNumbers). This doesn't need write barriers.
BuildFastLoop(
source_start, source_size,
[=](Node* field_index) {
TNode<IntPtrT> field_offset =
TimesTaggedSize(UncheckedCast<IntPtrT>(field_index));
-
- if (may_use_mutable_heap_numbers) {
- TNode<Object> field = LoadObjectField(source, field_offset);
- field = CloneIfMutablePrimitive(field);
- TNode<IntPtrT> result_offset =
- IntPtrAdd(field_offset, field_offset_difference);
- StoreObjectField(object, result_offset, field);
- } else {
- // Copy fields as raw data.
- TNode<IntPtrT> field =
- LoadObjectField<IntPtrT>(source, field_offset);
- TNode<IntPtrT> result_offset =
- IntPtrAdd(field_offset, field_offset_difference);
- StoreObjectFieldNoWriteBarrier(object, result_offset, field);
- }
+ TNode<TaggedT> field =
+ LoadObjectField<TaggedT>(CAST(source), field_offset);
+ TNode<IntPtrT> result_offset =
+ IntPtrAdd(field_offset, field_offset_difference);
+ StoreObjectFieldNoWriteBarrier(object, result_offset, field);
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
+ // If MutableHeapNumbers can occur, we need to go through the {object}
+ // again here and properly clone them. We use a second loop here to
+ // ensure that the GC (and heap verifier) always sees properly initialized
+ // objects, i.e. never hits undefined values in double fields.
+ if (!FLAG_unbox_double_fields) {
+ BuildFastLoop(
+ source_start, source_size,
+ [=](Node* field_index) {
+ TNode<IntPtrT> result_offset =
+ IntPtrAdd(TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)),
+ field_offset_difference);
+ TNode<Object> field = LoadObjectField(object, result_offset);
+ Label if_done(this), if_mutableheapnumber(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(field), &if_done);
+ Branch(IsMutableHeapNumber(CAST(field)), &if_mutableheapnumber,
+ &if_done);
+ BIND(&if_mutableheapnumber);
+ {
+ TNode<Object> value = AllocateMutableHeapNumberWithValue(
+ LoadHeapNumberValue(UncheckedCast<HeapNumber>(field)));
+ StoreObjectField(object, result_offset, value);
+ Goto(&if_done);
+ }
+ BIND(&if_done);
+ },
+ 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ }
+
Return(object);
}
@@ -3869,7 +3892,7 @@ void AccessorAssembler::GenerateKeyedHasIC() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kHas);
@@ -3880,7 +3903,7 @@ void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// TODO(magardn): implement HasProperty handling in KeyedLoadICGeneric
Return(HasProperty(context, receiver, name,
HasPropertyLookupMode::kHasProperty));
@@ -3893,7 +3916,7 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kHas);
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index b0d6291094..6127b244e3 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -68,21 +68,75 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
}
struct LoadICParameters {
- LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
- Node* vector, Node* holder = nullptr)
- : context(context),
- receiver(receiver),
- name(name),
- slot(slot),
- vector(vector),
- holder(holder ? holder : receiver) {}
-
- Node* context;
- Node* receiver;
- Node* name;
- Node* slot;
- Node* vector;
- Node* holder;
+ LoadICParameters(TNode<Context> context, Node* receiver, Node* name,
+ Node* slot, Node* vector, Node* holder = nullptr)
+ : context_(context),
+ receiver_(receiver),
+ name_(name),
+ slot_(slot),
+ vector_(vector),
+ holder_(holder ? holder : receiver) {}
+
+ LoadICParameters(const LoadICParameters* p, Node* unique_name)
+ : context_(p->context_),
+ receiver_(p->receiver_),
+ name_(unique_name),
+ slot_(p->slot_),
+ vector_(p->vector_),
+ holder_(p->holder_) {}
+
+ TNode<Context> context() const { return context_; }
+ Node* receiver() const { return receiver_; }
+ Node* name() const { return name_; }
+ Node* slot() const { return slot_; }
+ Node* vector() const { return vector_; }
+ Node* holder() const { return holder_; }
+
+ private:
+ TNode<Context> context_;
+ Node* receiver_;
+ Node* name_;
+ Node* slot_;
+ Node* vector_;
+ Node* holder_;
+ };
+
+ struct LazyLoadICParameters {
+ LazyLoadICParameters(LazyNode<Context> context, Node* receiver,
+ LazyNode<Object> name, Node* slot, Node* vector,
+ Node* holder = nullptr)
+ : context_(context),
+ receiver_(receiver),
+ name_(name),
+ slot_(slot),
+ vector_(vector),
+ holder_(holder ? holder : receiver) {}
+
+ explicit LazyLoadICParameters(const LoadICParameters* p)
+ : receiver_(p->receiver()),
+ slot_(p->slot()),
+ vector_(p->vector()),
+ holder_(p->holder()) {
+ TNode<Context> p_context = p->context();
+ context_ = [=] { return p_context; };
+ TNode<Object> p_name = TNode<Object>::UncheckedCast(p->name());
+ name_ = [=] { return p_name; };
+ }
+
+ TNode<Context> context() const { return context_(); }
+ Node* receiver() const { return receiver_; }
+ Node* name() const { return name_(); }
+ Node* slot() const { return slot_; }
+ Node* vector() const { return vector_; }
+ Node* holder() const { return holder_; }
+
+ private:
+ LazyNode<Context> context_;
+ Node* receiver_;
+ LazyNode<Object> name_;
+ Node* slot_;
+ Node* vector_;
+ Node* holder_;
};
void LoadGlobalIC(Node* vector, Node* slot,
@@ -93,7 +147,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// Specialized LoadIC for inlined bytecode handler, hand-tuned to omit frame
// construction on common paths.
- void LoadIC_BytecodeHandler(const LoadICParameters* p, ExitPoint* exit_point);
+ void LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
+ ExitPoint* exit_point);
// Loads dataX field from the DataHandler object.
TNode<MaybeObject> LoadHandlerDataField(SloppyTNode<DataHandler> handler,
@@ -101,11 +156,15 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
protected:
struct StoreICParameters : public LoadICParameters {
- StoreICParameters(Node* context, Node* receiver, Node* name,
+ StoreICParameters(TNode<Context> context, Node* receiver, Node* name,
SloppyTNode<Object> value, Node* slot, Node* vector)
: LoadICParameters(context, receiver, name, slot, vector),
- value(value) {}
- SloppyTNode<Object> value;
+ value_(value) {}
+
+ SloppyTNode<Object> value() const { return value_; }
+
+ private:
+ SloppyTNode<Object> value_;
};
enum class LoadAccessMode { kLoad, kHas };
@@ -127,7 +186,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
- void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield2 = nullptr);
+ void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield3 = nullptr);
void OverwriteExistingFastDataProperty(Node* object, Node* object_map,
Node* descriptors,
@@ -182,13 +241,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// LoadIC implementation.
void HandleLoadICHandlerCase(
- const LoadICParameters* p, TNode<Object> handler, Label* miss,
+ const LazyLoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode = ICMode::kNonGlobalIC,
OnNonExistent on_nonexistent = OnNonExistent::kReturnUndefined,
ElementSupport support_elements = kOnlyProperties,
LoadAccessMode access_mode = LoadAccessMode::kLoad);
- void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
+ void HandleLoadICSmiHandlerCase(const LazyLoadICParameters* p, Node* holder,
SloppyTNode<Smi> smi_handler,
SloppyTNode<Object> handler, Label* miss,
ExitPoint* exit_point,
@@ -196,18 +255,18 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
ElementSupport support_elements,
LoadAccessMode access_mode);
- void HandleLoadICProtoHandler(const LoadICParameters* p, Node* handler,
+ void HandleLoadICProtoHandler(const LazyLoadICParameters* p, Node* handler,
Variable* var_holder, Variable* var_smi_handler,
Label* if_smi_handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode,
LoadAccessMode access_mode);
- void HandleLoadCallbackProperty(const LoadICParameters* p,
+ void HandleLoadCallbackProperty(const LazyLoadICParameters* p,
TNode<JSObject> holder,
TNode<WordT> handler_word,
ExitPoint* exit_point);
- void HandleLoadAccessor(const LoadICParameters* p,
+ void HandleLoadAccessor(const LazyLoadICParameters* p,
TNode<CallHandlerInfo> call_handler_info,
TNode<WordT> handler_word, TNode<DataHandler> handler,
TNode<IntPtrT> handler_kind, ExitPoint* exit_point);
@@ -220,13 +279,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
Node* receiver, Label* can_access, Label* miss);
void HandleLoadICSmiHandlerLoadNamedCase(
- const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
TNode<WordT> handler_word, Label* rebox_double,
Variable* var_double_value, SloppyTNode<Object> handler, Label* miss,
ExitPoint* exit_point, OnNonExistent on_nonexistent,
ElementSupport support_elements);
- void HandleLoadICSmiHandlerHasNamedCase(const LoadICParameters* p,
+ void HandleLoadICSmiHandlerHasNamedCase(const LazyLoadICParameters* p,
Node* holder,
TNode<IntPtrT> handler_kind,
Label* miss, ExitPoint* exit_point);
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index a7a5b988f6..50b7cd1ebb 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -176,16 +176,28 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Node* rhs_instance_type = LoadInstanceType(rhs);
Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
- Branch(IsBigIntInstanceType(rhs_instance_type), &bigint,
- &call_with_any_feedback);
+ Goto(&call_with_any_feedback);
}
BIND(&bigint);
{
+ // Both {lhs} and {rhs} are of BigInt type.
+ Label bigint_too_big(this);
+ var_result.Bind(
+ CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs));
+ // Check for sentinel that signals BigIntTooBig exception.
+ GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
+
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
- var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
- SmiConstant(Operation::kAdd)));
Goto(&end);
+
+ BIND(&bigint_too_big);
+ {
+ // Update feedback to prevent deopt loop.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ feedback_vector, slot_id);
+ ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
+ }
}
BIND(&call_with_oddball_feedback);
@@ -363,8 +375,12 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_bigint);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
- var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
- SmiConstant(op)));
+ if (op == Operation::kAdd) {
+ var_result.Bind(CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs));
+ } else {
+ var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
+ SmiConstant(op)));
+ }
Goto(&end);
}
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 439d342f1e..54795d4202 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -47,7 +47,7 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
*holder_lookup = kHolderIsReceiver;
return Handle<JSObject>::null();
}
- if (object_map->has_hidden_prototype()) {
+ if (object_map->IsJSGlobalProxyMap() && !object_map->prototype().IsNull()) {
JSObject raw_prototype = JSObject::cast(object_map->prototype());
Handle<JSObject> prototype(raw_prototype, raw_prototype.GetIsolate());
object_map = handle(prototype->map(), prototype->GetIsolate());
@@ -60,7 +60,6 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
return Handle<JSObject>::null();
}
-
bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
Handle<JSObject> holder) const {
DCHECK(is_simple_api_call());
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 38b15618ac..29373d85d8 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -16,23 +16,6 @@
namespace v8 {
namespace internal {
-Address IC::constant_pool() const {
- if (FLAG_enable_embedded_constant_pool) {
- return raw_constant_pool();
- } else {
- return kNullAddress;
- }
-}
-
-
-Address IC::raw_constant_pool() const {
- if (FLAG_enable_embedded_constant_pool) {
- return *constant_pool_address_;
- } else {
- return kNullAddress;
- }
-}
-
void IC::update_receiver_map(Handle<Object> receiver) {
if (receiver->IsSmi()) {
receiver_map_ = isolate_->factory()->heap_number_map();
@@ -50,13 +33,6 @@ bool IC::IsHandler(MaybeObject object) {
(heap_object.IsDataHandler() || heap_object.IsCode()));
}
-bool IC::HostIsDeoptimizedCode() const {
- Code host =
- isolate()->inner_pointer_to_code_cache()->GetCacheEntry(pc())->code;
- return (host.kind() == Code::OPTIMIZED_FUNCTION &&
- host.marked_for_deoptimization());
-}
-
bool IC::vector_needs_update() {
if (state() == NO_FEEDBACK) return false;
return (!vector_set_ &&
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 64a9f315bb..377e3df6ae 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -29,9 +29,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/struct-inl.h"
-#ifdef V8_TRACE_FEEDBACK_UPDATES
#include "src/utils/ostreams.h"
-#endif // V8_TRACE_FEEDBACK_UPDATES
#include "src/execution/runtime-profiler.h"
#include "src/objects/prototype.h"
#include "src/runtime/runtime-utils.h"
@@ -89,7 +87,6 @@ const char* GetModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type, Handle<Object> name) {
if (V8_LIKELY(!TracingFlags::is_ic_stats_enabled())) return;
- if (HostIsDeoptimizedCode()) return;
State new_state =
(state() == NO_FEEDBACK) ? NO_FEEDBACK : nexus()->ic_state();
TraceIC(type, name, state(), new_state);
@@ -126,20 +123,21 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
return;
}
+ JavaScriptFrameIterator it(isolate());
+ JavaScriptFrame* frame = it.frame();
+ JSFunction function = frame->function();
+
ICStats::instance()->Begin();
ICInfo& ic_info = ICStats::instance()->Current();
ic_info.type = keyed_prefix ? "Keyed" : "";
ic_info.type += type;
- Object maybe_function =
- Object(Memory<Address>(fp_ + JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(maybe_function.IsJSFunction());
- JSFunction function = JSFunction::cast(maybe_function);
int code_offset = 0;
if (function.IsInterpreted()) {
- code_offset = InterpretedFrame::GetBytecodeOffset(fp());
+ code_offset = InterpretedFrame::GetBytecodeOffset(frame->fp());
} else {
- code_offset = static_cast<int>(pc() - function.code().InstructionStart());
+ code_offset =
+ static_cast<int>(frame->pc() - function.code().InstructionStart());
}
JavaScriptFrame::CollectFunctionAndOffsetForICStats(
function, function.abstract_code(), code_offset);
@@ -170,56 +168,11 @@ IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
target_maps_set_(false),
slow_stub_reason_(nullptr),
nexus_(vector, slot) {
- // To improve the performance of the (much used) IC code, we unfold a few
- // levels of the stack frame iteration code. This yields a ~35% speedup when
- // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
- const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
- Address* constant_pool = nullptr;
- if (FLAG_enable_embedded_constant_pool) {
- constant_pool = reinterpret_cast<Address*>(
- entry + ExitFrameConstants::kConstantPoolOffset);
- }
- Address* pc_address =
- reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
- Address fp = Memory<Address>(entry + ExitFrameConstants::kCallerFPOffset);
-#ifdef DEBUG
- StackFrameIterator it(isolate);
- for (int i = 0; i < 1; i++) it.Advance();
- StackFrame* frame = it.frame();
- DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
-#endif
- // For interpreted functions, some bytecode handlers construct a
- // frame. We have to skip the constructed frame to find the interpreted
- // function's frame. Check if the there is an additional frame, and if there
- // is skip this frame. However, the pc should not be updated. The call to
- // ICs happen from bytecode handlers.
- intptr_t frame_marker =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
- if (frame_marker == StackFrame::TypeToMarker(StackFrame::STUB)) {
- fp = Memory<Address>(fp + TypedFrameConstants::kCallerFPOffset);
- }
- fp_ = fp;
- if (FLAG_enable_embedded_constant_pool) {
- constant_pool_address_ = constant_pool;
- }
- pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
DCHECK_IMPLIES(!vector.is_null(), kind_ == nexus_.kind());
state_ = (vector.is_null()) ? NO_FEEDBACK : nexus_.ic_state();
old_state_ = state_;
}
-JSFunction IC::GetHostFunction() const {
- // Compute the JavaScript frame for the frame pointer of this IC
- // structure. We need this to be able to find the function
- // corresponding to the frame.
- StackFrameIterator it(isolate());
- while (it.frame()->fp() != this->fp()) it.Advance();
- JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
- // Find the function on the stack and both the active code for the
- // function and the original code.
- return frame->function();
-}
-
static void LookupForRead(LookupIterator* it, bool is_has_property) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -262,14 +215,14 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
// monomorphic.
if (IsGlobalIC()) return true;
- maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
+ MaybeObjectHandle maybe_handler = nexus()->FindHandlerForMap(receiver_map());
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
// to a more general elements kind.
// TODO(verwaest): Check if the current map is actually what the old map
// would transition to.
- if (maybe_handler_.is_null()) {
+ if (maybe_handler.is_null()) {
if (!receiver_map()->IsJSObjectMap()) return false;
Map first_map = FirstTargetMap();
if (first_map.is_null()) return false;
@@ -320,27 +273,23 @@ MaybeHandle<Object> IC::ReferenceError(Handle<Name> name) {
isolate(), NewReferenceError(MessageTemplate::kNotDefined, name), Object);
}
-// static
-void IC::OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
- JSFunction host_function, const char* reason) {
- FeedbackVector vector = nexus->vector();
- FeedbackSlot slot = nexus->slot();
- OnFeedbackChanged(isolate, vector, slot, host_function, reason);
+void IC::OnFeedbackChanged(const char* reason) {
+ vector_set_ = true;
+ FeedbackVector vector = nexus()->vector();
+ FeedbackSlot slot = nexus()->slot();
+ OnFeedbackChanged(isolate(), vector, slot, reason);
}
// static
void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
- FeedbackSlot slot, JSFunction host_function,
- const char* reason) {
+ FeedbackSlot slot, const char* reason) {
if (FLAG_trace_opt_verbose) {
- // TODO(leszeks): The host function is only needed for this print, we could
- // remove it as a parameter if we're of with removing this trace (or only
- // tracing the feedback vector, not the function name).
if (vector.profiler_ticks() != 0) {
- PrintF("[resetting ticks for ");
- host_function.ShortPrint();
- PrintF(" due from %d due to IC change: %s]\n", vector.profiler_ticks(),
- reason);
+ StdoutStream os;
+ os << "[resetting ticks for ";
+ vector.shared_function_info().ShortPrint(os);
+ os << " from " << vector.profiler_ticks()
+ << " due to IC change: " << reason << "]" << std::endl;
}
}
vector.set_profiler_ticks(0);
@@ -348,7 +297,6 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
#ifdef V8_TRACE_FEEDBACK_UPDATES
if (FLAG_trace_feedback_updates) {
int slot_count = vector.metadata().slot_count();
-
StdoutStream os;
if (slot.IsInvalid()) {
os << "[Feedback slots in ";
@@ -368,19 +316,20 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
#endif
isolate->runtime_profiler()->NotifyICChanged();
- // TODO(2029): When an optimized function is patched, it would
- // be nice to propagate the corresponding type information to its
- // unoptimized version for the benefit of later inlining.
}
-static bool MigrateDeprecated(Handle<Object> object) {
+namespace {
+
+bool MigrateDeprecated(Isolate* isolate, Handle<Object> object) {
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (!receiver->map().is_deprecated()) return false;
- JSObject::MigrateInstance(Handle<JSObject>::cast(object));
+ JSObject::MigrateInstance(isolate, receiver);
return true;
}
+} // namespace
+
bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
DCHECK_EQ(MEGAMORPHIC, new_state);
DCHECK_IMPLIES(!is_keyed(), key->IsName());
@@ -389,15 +338,13 @@ bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
// functions doesn't improve performance.
bool changed =
nexus()->ConfigureMegamorphic(key->IsName() ? PROPERTY : ELEMENT);
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Megamorphic");
+ OnFeedbackChanged("Megamorphic");
return changed;
}
void IC::ConfigureVectorState(Handle<Map> map) {
nexus()->ConfigurePremonomorphic(map);
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Premonomorphic");
+ OnFeedbackChanged("Premonomorphic");
}
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
@@ -415,9 +362,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
nexus()->ConfigureMonomorphic(name, map, handler);
}
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(),
- IsLoadGlobalIC() ? "LoadGlobal" : "Monomorphic");
+ OnFeedbackChanged(IsLoadGlobalIC() ? "LoadGlobal" : "Monomorphic");
}
void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
@@ -427,8 +372,7 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
if (!is_keyed()) name = Handle<Name>::null();
nexus()->ConfigurePolymorphic(name, maps, handlers);
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Polymorphic");
+ OnFeedbackChanged("Polymorphic");
}
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
@@ -454,7 +398,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
object, name);
}
- if (MigrateDeprecated(object)) use_ic = false;
+ if (MigrateDeprecated(isolate(), object)) use_ic = false;
if (state() != UNINITIALIZED) {
JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
@@ -573,11 +517,10 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
MapHandles maps;
MaybeObjectHandles handlers;
- TargetMaps(&maps);
+ nexus()->ExtractMapsAndHandlers(&maps, &handlers);
int number_of_maps = static_cast<int>(maps.size());
int deprecated_maps = 0;
int handler_to_overwrite = -1;
- if (!nexus()->FindHandlers(&handlers, number_of_maps)) return false;
for (int i = 0; i < number_of_maps; i++) {
Handle<Map> current_map = maps.at(i);
@@ -642,9 +585,8 @@ void IC::UpdateMonomorphicIC(const MaybeObjectHandle& handler,
void IC::CopyICToMegamorphicCache(Handle<Name> name) {
MapHandles maps;
MaybeObjectHandles handlers;
- TargetMaps(&maps);
- if (!nexus()->FindHandlers(&handlers, static_cast<int>(maps.size()))) return;
- for (int i = 0; i < static_cast<int>(maps.size()); i++) {
+ nexus()->ExtractMapsAndHandlers(&maps, &handlers);
+ for (size_t i = 0; i < maps.size(); ++i) {
UpdateMegamorphicCache(maps.at(i), name, handlers.at(i));
}
}
@@ -706,8 +648,7 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
}
void LoadIC::UpdateCaches(LookupIterator* lookup) {
- if (!FLAG_lazy_feedback_allocation && state() == UNINITIALIZED &&
- !IsLoadGlobalIC()) {
+ if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
@@ -763,17 +704,6 @@ void IC::UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
}
}
-void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
- DCHECK_EQ(LookupIterator::ACCESSOR, lookup->state());
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
- if (IsAnyLoad() || IsAnyHas()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Accessor);
- } else {
- DCHECK(IsAnyStore());
- TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Accessor);
- }
-}
-
Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> receiver = lookup->GetReceiver();
ReadOnlyRoots roots(isolate());
@@ -1035,7 +965,8 @@ bool KeyedLoadIC::CanChangeToAllowOutOfBounds(Handle<Map> receiver_map) {
void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
KeyedAccessLoadMode load_mode) {
Handle<Map> receiver_map(receiver->map(), isolate());
- DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE); // Checked by caller.
+ DCHECK(receiver_map->instance_type() !=
+ JS_PRIMITIVE_WRAPPER_TYPE); // Checked by caller.
MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
@@ -1046,8 +977,8 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
for (Handle<Map> map : target_receiver_maps) {
if (map.is_null()) continue;
- if (map->instance_type() == JS_VALUE_TYPE) {
- set_slow_stub_reason("JSValue");
+ if (map->instance_type() == JS_PRIMITIVE_WRAPPER_TYPE) {
+ set_slow_stub_reason("JSPrimitiveWrapper");
return;
}
if (map->instance_type() == JS_PROXY_TYPE) {
@@ -1091,7 +1022,8 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
- if (target_receiver_maps.size() > kMaxKeyedPolymorphism) {
+ if (static_cast<int>(target_receiver_maps.size()) >
+ FLAG_max_polymorphic_map_count) {
set_slow_stub_reason("max polymorph exceeded");
return;
}
@@ -1228,7 +1160,9 @@ namespace {
bool ConvertKeyToIndex(Handle<Object> receiver, Handle<Object> key,
uint32_t* index, InlineCacheState state) {
if (!FLAG_use_ic || state == NO_FEEDBACK) return false;
- if (receiver->IsAccessCheckNeeded() || receiver->IsJSValue()) return false;
+ if (receiver->IsAccessCheckNeeded() || receiver->IsJSPrimitiveWrapper()) {
+ return false;
+ }
// For regular JSReceiver or String receivers, the {key} must be a positive
// array index.
@@ -1299,7 +1233,7 @@ MaybeHandle<Object> KeyedLoadIC::RuntimeLoad(Handle<Object> object,
MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Handle<Object> key) {
- if (MigrateDeprecated(object)) {
+ if (MigrateDeprecated(isolate(), object)) {
return RuntimeLoad(object, key);
}
@@ -1459,7 +1393,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
StoreOrigin store_origin) {
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
- if (MigrateDeprecated(object)) {
+ if (MigrateDeprecated(isolate(), object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result, Object::SetProperty(isolate(), object, name, value),
@@ -1779,9 +1713,9 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
for (Handle<Map> map : target_receiver_maps) {
- if (!map.is_null() && map->instance_type() == JS_VALUE_TYPE) {
+ if (!map.is_null() && map->instance_type() == JS_PRIMITIVE_WRAPPER_TYPE) {
DCHECK(!IsStoreInArrayLiteralICKind(kind()));
- set_slow_stub_reason("JSValue");
+ set_slow_stub_reason("JSPrimitiveWrapper");
return;
}
}
@@ -1836,7 +1770,10 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// If the maximum number of receiver maps has been exceeded, use the
// megamorphic version of the IC.
- if (target_receiver_maps.size() > kMaxKeyedPolymorphism) return;
+ if (static_cast<int>(target_receiver_maps.size()) >
+ FLAG_max_polymorphic_map_count) {
+ return;
+ }
// Make sure all polymorphic handlers have the same store mode, otherwise the
// megamorphic stub must be used.
@@ -2001,13 +1938,9 @@ bool MayHaveTypedArrayInPrototypeChain(Handle<JSObject> object) {
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver, uint32_t index) {
bool oob_access = IsOutOfBoundsAccess(receiver, index);
// Don't consider this a growing store if the store would send the receiver to
- // dictionary mode. Also make sure we don't consider this a growing store if
- // there's any JSTypedArray in the {receiver}'s prototype chain, since that
- // prototype is going to swallow all stores that are out-of-bounds for said
- // prototype, and we just let the runtime deal with the complexity of this.
+ // dictionary mode.
bool allow_growth = receiver->IsJSArray() && oob_access &&
- !receiver->WouldConvertToSlowElements(index) &&
- !MayHaveTypedArrayInPrototypeChain(receiver);
+ !receiver->WouldConvertToSlowElements(index);
if (allow_growth) {
return STORE_AND_GROW_HANDLE_COW;
}
@@ -2024,7 +1957,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> value) {
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
- if (MigrateDeprecated(object)) {
+ if (MigrateDeprecated(isolate(), object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
@@ -2109,6 +2042,16 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (!old_receiver_map.is_null()) {
if (is_arguments) {
set_slow_stub_reason("arguments receiver");
+ } else if (object->IsJSArray() && IsGrowStoreMode(store_mode) &&
+ JSArray::HasReadOnlyLength(Handle<JSArray>::cast(object))) {
+ set_slow_stub_reason("array has read only length");
+ } else if (object->IsJSArray() && MayHaveTypedArrayInPrototypeChain(
+ Handle<JSObject>::cast(object))) {
+ // Make sure we don't handle this in IC if there's any JSTypedArray in
+ // the {receiver}'s prototype chain, since that prototype is going to
+ // swallow all stores that are out-of-bounds for said prototype, and we
+ // just let the runtime deal with the complexity of this.
+ set_slow_stub_reason("typed array in the prototype chain of an Array");
} else if (key_is_valid_index) {
if (old_receiver_map->is_abandoned_prototype_map()) {
set_slow_stub_reason("receiver with prototype map");
@@ -2160,7 +2103,8 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
DCHECK(!array->map().IsMapInArrayPrototypeChain(isolate()));
DCHECK(index->IsNumber());
- if (!FLAG_use_ic || state() == NO_FEEDBACK || MigrateDeprecated(array)) {
+ if (!FLAG_use_ic || state() == NO_FEEDBACK ||
+ MigrateDeprecated(isolate(), array)) {
StoreOwnElement(isolate(), array, index, value);
TraceIC("StoreInArrayLiteralIC", index);
return;
@@ -2585,10 +2529,9 @@ static bool CanFastCloneObject(Handle<Map> map) {
return true;
}
-static Handle<Map> FastCloneObjectMap(Isolate* isolate,
- Handle<HeapObject> source, int flags) {
- Handle<Map> source_map(source->map(), isolate);
- SLOW_DCHECK(source->IsNullOrUndefined() || CanFastCloneObject(source_map));
+static Handle<Map> FastCloneObjectMap(Isolate* isolate, Handle<Map> source_map,
+ int flags) {
+ SLOW_DCHECK(CanFastCloneObject(source_map));
Handle<JSFunction> constructor(isolate->native_context()->object_function(),
isolate);
DCHECK(constructor->has_initial_map());
@@ -2613,9 +2556,10 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate,
Map::SetPrototype(isolate, map, isolate->factory()->null_value());
}
- if (source->IsNullOrUndefined() || !source_map->NumberOfOwnDescriptors()) {
+ if (source_map->NumberOfOwnDescriptors() == 0) {
return map;
}
+ DCHECK(!source_map->IsNullOrUndefinedMap());
if (map.is_identical_to(initial_map)) {
map = Map::Copy(isolate, map, "InitializeClonedDescriptors");
@@ -2640,7 +2584,7 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate,
}
static MaybeHandle<JSObject> CloneObjectSlowPath(Isolate* isolate,
- Handle<HeapObject> source,
+ Handle<Object> source,
int flags) {
Handle<JSObject> new_object;
if (flags & ObjectLiteral::kHasNullPrototype) {
@@ -2664,35 +2608,31 @@ static MaybeHandle<JSObject> CloneObjectSlowPath(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- Handle<HeapObject> source = args.at<HeapObject>(0);
+ Handle<Object> source = args.at<Object>(0);
int flags = args.smi_at(1);
- MigrateDeprecated(source);
-
- FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
- Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
- if (maybe_vector->IsUndefined()) {
- RETURN_RESULT_OR_FAILURE(isolate,
- CloneObjectSlowPath(isolate, source, flags));
- }
-
- DCHECK(maybe_vector->IsFeedbackVector());
- Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(maybe_vector);
-
- FeedbackNexus nexus(vector, slot);
- Handle<Map> source_map(source->map(), isolate);
+ if (!MigrateDeprecated(isolate, source)) {
+ FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
+ if (maybe_vector->IsFeedbackVector()) {
+ FeedbackNexus nexus(Handle<FeedbackVector>::cast(maybe_vector), slot);
+ if (!source->IsSmi() && !nexus.IsMegamorphic()) {
+ Handle<Map> source_map(Handle<HeapObject>::cast(source)->map(),
+ isolate);
+ if (CanFastCloneObject(source_map)) {
+ Handle<Map> target_map =
+ FastCloneObjectMap(isolate, source_map, flags);
+ nexus.ConfigureCloneObject(source_map, target_map);
+ return *target_map;
+ }
- if (!CanFastCloneObject(source_map) || nexus.IsMegamorphic()) {
- // Migrate to slow mode if needed.
- nexus.ConfigureMegamorphic();
- RETURN_RESULT_OR_FAILURE(isolate,
- CloneObjectSlowPath(isolate, source, flags));
+ nexus.ConfigureMegamorphic();
+ }
+ }
}
- Handle<Map> result_map = FastCloneObjectMap(isolate, source, flags);
- nexus.ConfigureCloneObject(source_map, result_map);
-
- return *result_map;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ CloneObjectSlowPath(isolate, source, flags));
}
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
@@ -2718,46 +2658,6 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
return *value;
}
-RUNTIME_FUNCTION(Runtime_LoadCallbackProperty) {
- Handle<JSObject> receiver = args.at<JSObject>(0);
- Handle<JSObject> holder = args.at<JSObject>(1);
- Handle<AccessorInfo> info = args.at<AccessorInfo>(2);
- Handle<Name> name = args.at<Name>(3);
- HandleScope scope(isolate);
-
- DCHECK(info->IsCompatibleReceiver(*receiver));
-
- PropertyCallbackArguments custom_args(isolate, info->data(), *receiver,
- *holder, Just(kThrowOnError));
- Handle<Object> result = custom_args.CallAccessorGetter(info, name);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.is_null()) return ReadOnlyRoots(isolate).undefined_value();
- return *result;
-}
-
-RUNTIME_FUNCTION(Runtime_LoadAccessorProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 3);
- Handle<JSObject> receiver = args.at<JSObject>(0);
- int handler_kind = args.smi_at(1);
- Handle<CallHandlerInfo> call_handler_info = args.at<CallHandlerInfo>(2);
-
- Object holder = *receiver;
- if (handler_kind == LoadHandler::kApiGetterHolderIsPrototype) {
- holder = receiver->map().prototype();
- } else {
- DCHECK_EQ(handler_kind, LoadHandler::kApiGetter);
- }
-
- // Call the accessor without additional arguments.
- FunctionCallbackArguments custom(isolate, call_handler_info->data(),
- *receiver, holder, HeapObject(), nullptr, 0);
- Handle<Object> result_handle = custom.Call(*call_handler_info);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (result_handle.is_null()) return ReadOnlyRoots(isolate).undefined_value();
- return *result_handle;
-}
-
/**
* Loads a property with an interceptor performing post interceptor
* lookup if interceptor failed.
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 39462d55e5..29f3b4a60a 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -7,8 +7,8 @@
#include <vector>
+#include "src/common/message-template.h"
#include "src/execution/isolate.h"
-#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/ic/stub-cache.h"
#include "src/objects/feedback-vector.h"
@@ -29,8 +29,6 @@ class IC {
// Alias the inline cache state type to make the IC code more readable.
using State = InlineCacheState;
- static constexpr int kMaxKeyedPolymorphism = 4;
-
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
@@ -62,25 +60,15 @@ class IC {
// Nofity the IC system that a feedback has changed.
static void OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
- FeedbackSlot slot, JSFunction host_function,
- const char* reason);
+ FeedbackSlot slot, const char* reason);
- static void OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
- JSFunction host_function, const char* reason);
+ void OnFeedbackChanged(const char* reason);
protected:
- Address fp() const { return fp_; }
- Address pc() const { return *pc_address_; }
-
void set_slow_stub_reason(const char* reason) { slow_stub_reason_ = reason; }
Isolate* isolate() const { return isolate_; }
- // Get the caller function object.
- JSFunction GetHostFunction() const;
-
- inline bool HostIsDeoptimizedCode() const;
-
bool is_vector_set() { return vector_set_; }
inline bool vector_needs_update();
@@ -106,8 +94,6 @@ class IC {
Handle<Object> key);
MaybeHandle<Object> ReferenceError(Handle<Name> name);
- void TraceHandlerCacheHitStats(LookupIterator* lookup);
-
void UpdateMonomorphicIC(const MaybeObjectHandle& handler, Handle<Name> name);
bool UpdatePolymorphicIC(Handle<Name> name, const MaybeObjectHandle& handler);
void UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
@@ -158,28 +144,12 @@ class IC {
FeedbackNexus* nexus() { return &nexus_; }
private:
- inline Address constant_pool() const;
- inline Address raw_constant_pool() const;
-
void FindTargetMaps() {
if (target_maps_set_) return;
target_maps_set_ = true;
nexus()->ExtractMaps(&target_maps_);
}
- // Frame pointer for the frame that uses (calls) the IC.
- Address fp_;
-
- // All access to the program counter and constant pool of an IC structure is
- // indirect to make the code GC safe. This feature is crucial since
- // GetProperty and SetProperty are called and they in turn might
- // invoke the garbage collector.
- Address* pc_address_;
-
- // The constant pool of the code which originally called the IC (which might
- // be for the breakpointed copy of the original code).
- Address* constant_pool_address_;
-
Isolate* isolate_;
bool vector_set_;
@@ -187,7 +157,6 @@ class IC {
State state_;
FeedbackSlotKind kind_;
Handle<Map> receiver_map_;
- MaybeObjectHandle maybe_handler_;
MapHandles target_maps_;
bool target_maps_set_;
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index f7e79ee6d7..7e87b015d4 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -185,7 +185,7 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
TNode<Int32T> instance_type = LoadMapInstanceType(prototype_map);
GotoIf(IsCustomElementsReceiverInstanceType(instance_type),
non_fast_elements);
- Node* elements_kind = LoadMapElementsKind(prototype_map);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(prototype_map);
GotoIf(IsFastElementsKind(elements_kind), &loop_body);
GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
Goto(non_fast_elements);
@@ -500,7 +500,7 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
if_grow(this), if_nonfast(this), if_typed_array(this),
if_dictionary(this);
Node* elements = LoadElements(receiver);
- Node* elements_kind = LoadMapElementsKind(receiver_map);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(receiver_map);
Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
BIND(&if_fast);
@@ -775,7 +775,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
Label descriptor_found(this), lookup_transition(this);
TVARIABLE(IntPtrT, var_name_index);
- DescriptorLookup(p->name, descriptors, bitfield3, &descriptor_found,
+ DescriptorLookup(p->name(), descriptors, bitfield3, &descriptor_found,
&var_name_index, &lookup_transition);
BIND(&descriptor_found);
@@ -801,18 +801,18 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&data_property);
{
- CheckForAssociatedProtector(p->name, slow);
+ CheckForAssociatedProtector(p->name(), slow);
OverwriteExistingFastDataProperty(receiver, receiver_map, descriptors,
- name_index, details, p->value, slow,
+ name_index, details, p->value(), slow,
false);
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
}
BIND(&lookup_transition);
{
Comment("lookup transition");
TNode<Map> transition_map = FindCandidateStoreICTransitionMapHandler(
- receiver_map, CAST(p->name), slow);
+ receiver_map, CAST(p->name()), slow);
// Validate the transition handler candidate and apply the transition.
StoreTransitionMapFlags flags = kValidateTransitionHandler;
@@ -820,7 +820,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
flags = StoreTransitionMapFlags(flags | kCheckPrototypeValidity);
}
HandleStoreICTransitionMapHandlerCase(p, transition_map, slow, flags);
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
}
@@ -833,7 +833,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index), not_found(this);
TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver)));
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name),
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
&dictionary_found, &var_name_index,
&not_found);
BIND(&dictionary_found);
@@ -858,38 +858,47 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&overwrite);
{
- CheckForAssociatedProtector(p->name, slow);
+ CheckForAssociatedProtector(p->name(), slow);
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- p->value);
- exit_point->Return(p->value);
+ p->value());
+ exit_point->Return(p->value());
}
}
BIND(&not_found);
{
- CheckForAssociatedProtector(p->name, slow);
- Label extensible(this);
- Node* bitfield2 = LoadMapBitField2(receiver_map);
- GotoIf(IsPrivateSymbol(p->name), &extensible);
- Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield2), &extensible, slow);
+ CheckForAssociatedProtector(p->name(), slow);
+ Label extensible(this), is_private_symbol(this);
+ Node* bitfield3 = LoadMapBitField3(receiver_map);
+ GotoIf(IsPrivateSymbol(p->name()), &is_private_symbol);
+ Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield3), &extensible, slow);
+
+ BIND(&is_private_symbol);
+ {
+ CSA_ASSERT(this, IsPrivateSymbol(p->name()));
+ // For private names, we miss to the runtime which will throw.
+ // For private symbols, we extend and store an own property.
+ Branch(IsPrivateName(p->name()), slow, &extensible);
+ }
BIND(&extensible);
if (ShouldCheckPrototype()) {
DCHECK(ShouldCallSetter());
LookupPropertyOnPrototypeChain(
- receiver_map, p->name, &accessor, &var_accessor_pair,
+ receiver_map, p->name(), &accessor, &var_accessor_pair,
&var_accessor_holder,
ShouldReconfigureExisting() ? nullptr : &readonly, slow);
}
Label add_dictionary_property_slow(this);
- InvalidateValidityCellIfPrototype(receiver_map, bitfield2);
- Add<NameDictionary>(properties, CAST(p->name), p->value,
+ InvalidateValidityCellIfPrototype(receiver_map, bitfield3);
+ Add<NameDictionary>(properties, CAST(p->name()), p->value(),
&add_dictionary_property_slow);
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
BIND(&add_dictionary_property_slow);
- exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty, p->context,
- p->receiver, p->name, p->value);
+ exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty,
+ p->context(), p->receiver(), p->name(),
+ p->value());
}
}
@@ -908,8 +917,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
GotoIfNot(IsCallableMap(setter_map), &not_callable);
Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, p->context, setter, receiver, p->value);
- exit_point->Return(p->value);
+ CallJS(callable, p->context(), setter, receiver, p->value());
+ exit_point->Return(p->value());
BIND(&not_callable);
{
@@ -917,17 +926,17 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
if (maybe_language_mode.To(&language_mode)) {
if (language_mode == LanguageMode::kStrict) {
exit_point->ReturnCallRuntime(
- Runtime::kThrowTypeError, p->context,
- SmiConstant(MessageTemplate::kNoSetterInCallback), p->name,
+ Runtime::kThrowTypeError, p->context(),
+ SmiConstant(MessageTemplate::kNoSetterInCallback), p->name(),
var_accessor_holder.value());
} else {
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
} else {
- CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context,
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
SmiConstant(MessageTemplate::kNoSetterInCallback),
- p->name, var_accessor_holder.value());
- exit_point->Return(p->value);
+ p->name(), var_accessor_holder.value());
+ exit_point->Return(p->value());
}
}
}
@@ -939,17 +948,17 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
LanguageMode language_mode;
if (maybe_language_mode.To(&language_mode)) {
if (language_mode == LanguageMode::kStrict) {
- Node* type = Typeof(p->receiver);
- ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
- p->name, type, p->receiver);
+ Node* type = Typeof(p->receiver());
+ ThrowTypeError(p->context(), MessageTemplate::kStrictReadOnlyProperty,
+ p->name(), type, p->receiver());
} else {
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
} else {
- CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context,
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
SmiConstant(MessageTemplate::kStrictReadOnlyProperty),
- p->name, Typeof(p->receiver), p->receiver);
- exit_point->Return(p->value);
+ p->name(), Typeof(p->receiver()), p->receiver());
+ exit_point->Return(p->value());
}
}
}
@@ -1062,7 +1071,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
BIND(&store_property);
{
- StoreICParameters p(context, receiver, name, value, slot, vector);
+ StoreICParameters p(CAST(context), receiver, name, value, slot, vector);
EmitGenericPropertyStore(receiver, receiver_map, &p, &miss);
}
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index bdac1ce334..04381bf693 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h" // For InYoungGeneration().
#include "src/ic/ic-inl.h"
#include "src/logging/counters.h"
+#include "src/objects/tagged-value-inl.h"
namespace v8 {
namespace internal {
@@ -85,25 +86,28 @@ void StubCache::Set(Name name, Map map, MaybeObject handler) {
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- MaybeObject old_handler(primary->value);
-
+ MaybeObject old_handler(
+ TaggedValue::ToMaybeObject(isolate(), primary->value));
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
if (old_handler != MaybeObject::FromObject(
- isolate_->builtins()->builtin(Builtins::kIllegal)) &&
- primary->map != kNullAddress) {
- Map old_map = Map::cast(Object(primary->map));
- int seed = PrimaryOffset(Name::cast(Object(primary->key)), old_map);
- int secondary_offset =
- SecondaryOffset(Name::cast(Object(primary->key)), seed);
+ isolate()->builtins()->builtin(Builtins::kIllegal)) &&
+ !primary->map.IsSmi()) {
+ Map old_map =
+ Map::cast(StrongTaggedValue::ToObject(isolate(), primary->map));
+ int seed = PrimaryOffset(
+ Name::cast(StrongTaggedValue::ToObject(isolate(), primary->key)),
+ old_map);
+ int secondary_offset = SecondaryOffset(
+ Name::cast(StrongTaggedValue::ToObject(isolate(), primary->key)), seed);
Entry* secondary = entry(secondary_, secondary_offset);
*secondary = *primary;
}
// Update primary cache.
- primary->key = name.ptr();
- primary->value = handler.ptr();
- primary->map = map.ptr();
+ primary->key = StrongTaggedValue(name);
+ primary->value = TaggedValue(handler);
+ primary->map = StrongTaggedValue(map);
isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
}
@@ -111,13 +115,13 @@ MaybeObject StubCache::Get(Name name, Map map) {
DCHECK(CommonStubCacheChecks(this, name, map, MaybeObject()));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- if (primary->key == name.ptr() && primary->map == map.ptr()) {
- return MaybeObject(primary->value);
+ if (primary->key == name && primary->map == map) {
+ return TaggedValue::ToMaybeObject(isolate(), primary->value);
}
int secondary_offset = SecondaryOffset(name, primary_offset);
Entry* secondary = entry(secondary_, secondary_offset);
- if (secondary->key == name.ptr() && secondary->map == map.ptr()) {
- return MaybeObject(secondary->value);
+ if (secondary->key == name && secondary->map == map) {
+ return TaggedValue::ToMaybeObject(isolate(), secondary->value);
}
return MaybeObject();
}
@@ -127,14 +131,14 @@ void StubCache::Clear() {
isolate_->builtins()->builtin(Builtins::kIllegal));
Name empty_string = ReadOnlyRoots(isolate()).empty_string();
for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = empty_string.ptr();
- primary_[i].map = kNullAddress;
- primary_[i].value = empty.ptr();
+ primary_[i].key = StrongTaggedValue(empty_string);
+ primary_[i].map = StrongTaggedValue(Smi::zero());
+ primary_[i].value = TaggedValue(empty);
}
for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = empty_string.ptr();
- secondary_[j].map = kNullAddress;
- secondary_[j].value = empty.ptr();
+ secondary_[j].key = StrongTaggedValue(empty_string);
+ secondary_[j].map = StrongTaggedValue(Smi::zero());
+ secondary_[j].value = TaggedValue(empty);
}
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 61318245e6..87acc0e007 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -6,6 +6,7 @@
#define V8_IC_STUB_CACHE_H_
#include "src/objects/name.h"
+#include "src/objects/tagged-value.h"
namespace v8 {
namespace internal {
@@ -31,15 +32,14 @@ class SCTableReference {
class V8_EXPORT_PRIVATE StubCache {
public:
struct Entry {
- // The values here have plain Address types because they are read
- // directly from generated code. As a nice side effect, this keeps
- // #includes lightweight.
- Address key;
+ // {key} is a tagged Name pointer, may be cleared by setting to empty
+ // string.
+ StrongTaggedValue key;
// {value} is a tagged heap object reference (weak or strong), equivalent
// to a MaybeObject's payload.
- Address value;
- // {map} is a tagged Map pointer, or nullptr.
- Address map;
+ TaggedValue value;
+ // {map} is a tagged Map pointer, may be cleared by setting to Smi::zero().
+ StrongTaggedValue map;
};
void Initialize();
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
index c5a41de1fd..aa006edd35 100644
--- a/deps/v8/src/init/OWNERS
+++ b/deps/v8/src/init/OWNERS
@@ -1,5 +1,14 @@
ahaas@chromium.org
bmeurer@chromium.org
-jkummerow@chromium.org
+ftang@chromium.org
+gsathya@chromium.org
+ishell@chromium.org
jgruber@chromium.org
+jkummerow@chromium.org
+marja@chromium.org
+mathias@chromium.org
+ulan@chromium.org
+verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index a080f8cdf0..176749781c 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -12,6 +12,7 @@
#include "src/debug/debug.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/microtask-queue.h"
+#include "src/extensions/cputracemark-extension.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
#include "src/extensions/gc-extension.h"
@@ -123,6 +124,11 @@ static const char* GCFunctionName() {
return flag_given ? FLAG_expose_gc_as : "gc";
}
+static bool isValidCpuTraceMarkFunctionName() {
+ return FLAG_expose_cputracemark_as != nullptr &&
+ strlen(FLAG_expose_cputracemark_as) != 0;
+}
+
void Bootstrapper::InitializeOncePerProcess() {
v8::RegisterExtension(v8::base::make_unique<FreeBufferExtension>());
v8::RegisterExtension(v8::base::make_unique<GCExtension>(GCFunctionName()));
@@ -130,6 +136,10 @@ void Bootstrapper::InitializeOncePerProcess() {
v8::RegisterExtension(v8::base::make_unique<StatisticsExtension>());
v8::RegisterExtension(v8::base::make_unique<TriggerFailureExtension>());
v8::RegisterExtension(v8::base::make_unique<IgnitionStatisticsExtension>());
+ if (isValidCpuTraceMarkFunctionName()) {
+ v8::RegisterExtension(v8::base::make_unique<CpuTraceMarkExtension>(
+ FLAG_expose_cputracemark_as));
+ }
}
void Bootstrapper::TearDown() {
@@ -343,7 +353,7 @@ void Bootstrapper::LogAllMaps() {
void Bootstrapper::DetachGlobal(Handle<Context> env) {
isolate_->counters()->errors_thrown_per_context()->AddSample(
- env->GetErrorsThrown());
+ env->native_context().GetErrorsThrown());
ReadOnlyRoots roots(isolate_);
Handle<JSGlobalProxy> global_proxy(env->global_proxy(), isolate_);
@@ -1242,7 +1252,6 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
JS_GLOBAL_PROXY_TYPE);
}
global_proxy_function->initial_map().set_is_access_check_needed(true);
- global_proxy_function->initial_map().set_has_hidden_prototype(true);
global_proxy_function->initial_map().set_may_have_interesting_symbols(true);
native_context()->set_global_proxy_function(*global_proxy_function);
@@ -1424,7 +1433,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, object_function, "is", Builtins::kObjectIs,
2, true);
SimpleInstallFunction(isolate_, object_function, "preventExtensions",
- Builtins::kObjectPreventExtensions, 1, false);
+ Builtins::kObjectPreventExtensions, 1, true);
SimpleInstallFunction(isolate_, object_function, "seal",
Builtins::kObjectSeal, 1, false);
@@ -1432,41 +1441,34 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, object_function, "create", Builtins::kObjectCreate, 2, false);
native_context()->set_object_create(*object_create);
- Handle<JSFunction> object_define_properties =
- SimpleInstallFunction(isolate_, object_function, "defineProperties",
- Builtins::kObjectDefineProperties, 2, true);
- native_context()->set_object_define_properties(*object_define_properties);
+ SimpleInstallFunction(isolate_, object_function, "defineProperties",
+ Builtins::kObjectDefineProperties, 2, true);
- Handle<JSFunction> object_define_property =
- SimpleInstallFunction(isolate_, object_function, "defineProperty",
- Builtins::kObjectDefineProperty, 3, true);
- native_context()->set_object_define_property(*object_define_property);
+ SimpleInstallFunction(isolate_, object_function, "defineProperty",
+ Builtins::kObjectDefineProperty, 3, true);
SimpleInstallFunction(isolate_, object_function, "freeze",
Builtins::kObjectFreeze, 1, false);
- Handle<JSFunction> object_get_prototype_of =
- SimpleInstallFunction(isolate_, object_function, "getPrototypeOf",
- Builtins::kObjectGetPrototypeOf, 1, false);
- native_context()->set_object_get_prototype_of(*object_get_prototype_of);
+ SimpleInstallFunction(isolate_, object_function, "getPrototypeOf",
+ Builtins::kObjectGetPrototypeOf, 1, true);
SimpleInstallFunction(isolate_, object_function, "setPrototypeOf",
- Builtins::kObjectSetPrototypeOf, 2, false);
+ Builtins::kObjectSetPrototypeOf, 2, true);
SimpleInstallFunction(isolate_, object_function, "isExtensible",
- Builtins::kObjectIsExtensible, 1, false);
+ Builtins::kObjectIsExtensible, 1, true);
SimpleInstallFunction(isolate_, object_function, "isFrozen",
Builtins::kObjectIsFrozen, 1, false);
- Handle<JSFunction> object_is_sealed =
- SimpleInstallFunction(isolate_, object_function, "isSealed",
- Builtins::kObjectIsSealed, 1, false);
- native_context()->set_object_is_sealed(*object_is_sealed);
+ SimpleInstallFunction(isolate_, object_function, "isSealed",
+ Builtins::kObjectIsSealed, 1, false);
- Handle<JSFunction> object_keys = SimpleInstallFunction(
- isolate_, object_function, "keys", Builtins::kObjectKeys, 1, true);
- native_context()->set_object_keys(*object_keys);
+ SimpleInstallFunction(isolate_, object_function, "keys",
+ Builtins::kObjectKeys, 1, true);
SimpleInstallFunction(isolate_, object_function, "entries",
Builtins::kObjectEntries, 1, true);
+ SimpleInstallFunction(isolate_, object_function, "fromEntries",
+ Builtins::kObjectFromEntries, 1, false);
SimpleInstallFunction(isolate_, object_function, "values",
Builtins::kObjectValues, 1, true);
@@ -1637,7 +1639,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(isolate_, array_function);
// Cache the array maps, needed by ArrayConstructorStub
- CacheInitialJSArrayMaps(native_context(), initial_map);
+ CacheInitialJSArrayMaps(isolate_, native_context(), initial_map);
// Set up %ArrayPrototype%.
// The %ArrayPrototype% has TERMINAL_FAST_ELEMENTS_KIND in order to ensure
@@ -1648,10 +1650,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSFunction::SetPrototype(array_function, proto);
native_context()->set_initial_array_prototype(*proto);
- Handle<JSFunction> is_arraylike = SimpleInstallFunction(
- isolate_, array_function, "isArray", Builtins::kArrayIsArray, 1, true);
- native_context()->set_is_arraylike(*is_arraylike);
-
+ SimpleInstallFunction(isolate_, array_function, "isArray",
+ Builtins::kArrayIsArray, 1, true);
SimpleInstallFunction(isolate_, array_function, "from",
Builtins::kArrayFrom, 1, false);
SimpleInstallFunction(isolate_, array_function, "of", Builtins::kArrayOf, 0,
@@ -1786,15 +1786,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- N u m b e r ---
Handle<JSFunction> number_fun = InstallFunction(
- isolate_, global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate_->initial_object_prototype(), Builtins::kNumberConstructor);
+ isolate_, global, "Number", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, isolate_->initial_object_prototype(),
+ Builtins::kNumberConstructor);
number_fun->shared().DontAdaptArguments();
number_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, number_fun,
Context::NUMBER_FUNCTION_INDEX);
// Create the %NumberPrototype%
- Handle<JSValue> prototype = Handle<JSValue>::cast(
+ Handle<JSPrimitiveWrapper> prototype = Handle<JSPrimitiveWrapper>::cast(
factory->NewJSObject(number_fun, AllocationType::kOld));
prototype->set_value(Smi::kZero);
JSFunction::SetPrototype(number_fun, prototype);
@@ -1869,15 +1870,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- B o o l e a n ---
Handle<JSFunction> boolean_fun = InstallFunction(
- isolate_, global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate_->initial_object_prototype(), Builtins::kBooleanConstructor);
+ isolate_, global, "Boolean", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, isolate_->initial_object_prototype(),
+ Builtins::kBooleanConstructor);
boolean_fun->shared().DontAdaptArguments();
boolean_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, boolean_fun,
Context::BOOLEAN_FUNCTION_INDEX);
// Create the %BooleanPrototype%
- Handle<JSValue> prototype = Handle<JSValue>::cast(
+ Handle<JSPrimitiveWrapper> prototype = Handle<JSPrimitiveWrapper>::cast(
factory->NewJSObject(boolean_fun, AllocationType::kOld));
prototype->set_value(ReadOnlyRoots(isolate_).false_value());
JSFunction::SetPrototype(boolean_fun, prototype);
@@ -1895,8 +1897,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- S t r i n g ---
Handle<JSFunction> string_fun = InstallFunction(
- isolate_, global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate_->initial_object_prototype(), Builtins::kStringConstructor);
+ isolate_, global, "String", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, isolate_->initial_object_prototype(),
+ Builtins::kStringConstructor);
string_fun->shared().DontAdaptArguments();
string_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, string_fun,
@@ -1929,7 +1932,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
false);
// Create the %StringPrototype%
- Handle<JSValue> prototype = Handle<JSValue>::cast(
+ Handle<JSPrimitiveWrapper> prototype = Handle<JSPrimitiveWrapper>::cast(
factory->NewJSObject(string_fun, AllocationType::kOld));
prototype->set_value(ReadOnlyRoots(isolate_).empty_string());
JSFunction::SetPrototype(string_fun, prototype);
@@ -2090,9 +2093,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- S y m b o l ---
- Handle<JSFunction> symbol_fun = InstallFunction(
- isolate_, global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
- factory->the_hole_value(), Builtins::kSymbolConstructor);
+ Handle<JSFunction> symbol_fun =
+ InstallFunction(isolate_, global, "Symbol", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, factory->the_hole_value(),
+ Builtins::kSymbolConstructor);
symbol_fun->shared().set_length(0);
symbol_fun->shared().DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
@@ -2592,6 +2596,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<RegExpMatchInfo> last_match_info = factory->NewRegExpMatchInfo();
native_context()->set_regexp_last_match_info(*last_match_info);
+ // Install the species protector cell.
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ native_context()->set_regexp_species_protector(*cell);
+ }
+
// Force the RegExp constructor to fast properties, so that we can use the
// fast paths for various things like
//
@@ -2691,6 +2703,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<EmbedderDataArray> embedder_data = factory->NewEmbedderDataArray(0);
native_context()->set_embedder_data(*embedder_data);
+ { // -- g l o b a l T h i s
+ Handle<JSGlobalProxy> global_proxy(native_context()->global_proxy(),
+ isolate_);
+ JSObject::AddProperty(isolate_, global, factory->globalThis_string(),
+ global_proxy, DONT_ENUM);
+ }
+
{ // -- J S O N
Handle<JSObject> json_object =
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
@@ -3393,9 +3412,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- B i g I n t
- Handle<JSFunction> bigint_fun = InstallFunction(
- isolate_, global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
- factory->the_hole_value(), Builtins::kBigIntConstructor);
+ Handle<JSFunction> bigint_fun =
+ InstallFunction(isolate_, global, "BigInt", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, factory->the_hole_value(),
+ Builtins::kBigIntConstructor);
bigint_fun->shared().DontAdaptArguments();
bigint_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, bigint_fun,
@@ -3642,15 +3662,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
JSObject::AddProperty(isolate_, global, reflect_string, reflect, DONT_ENUM);
- Handle<JSFunction> define_property =
SimpleInstallFunction(isolate_, reflect, "defineProperty",
Builtins::kReflectDefineProperty, 3, true);
- native_context()->set_reflect_define_property(*define_property);
- Handle<JSFunction> delete_property =
SimpleInstallFunction(isolate_, reflect, "deleteProperty",
Builtins::kReflectDeleteProperty, 2, true);
- native_context()->set_reflect_delete_property(*delete_property);
Handle<JSFunction> apply = SimpleInstallFunction(
isolate_, reflect, "apply", Builtins::kReflectApply, 3, false);
@@ -4232,28 +4248,20 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_hashbang)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_add_calendar_numbering_system)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_bigint)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(
+ harmony_intl_dateformat_fractional_second_digits)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_quarter)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_datetime_style)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_numberformat_unified)
#endif // V8_INTL_SUPPORT
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
-void Genesis::InitializeGlobal_harmony_global() {
- if (!FLAG_harmony_global) return;
-
- Factory* factory = isolate()->factory();
- Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- Handle<JSGlobalProxy> global_proxy(native_context()->global_proxy(),
- isolate());
- JSObject::AddProperty(isolate_, global, factory->globalThis_string(),
- global_proxy, DONT_ENUM);
-}
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -4285,6 +4293,9 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
isolate(), finalization_group_name, JS_FINALIZATION_GROUP_TYPE,
JSFinalizationGroup::kSize, 0, finalization_group_prototype,
Builtins::kFinalizationGroupConstructor);
+ InstallWithIntrinsicDefaultProto(
+ isolate(), finalization_group_fun,
+ Context::JS_FINALIZATION_GROUP_FUNCTION_INDEX);
finalization_group_fun->shared().DontAdaptArguments();
finalization_group_fun->shared().set_length(1);
@@ -4301,7 +4312,7 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
finalization_group_fun, DONT_ENUM);
SimpleInstallFunction(isolate(), finalization_group_prototype, "register",
- Builtins::kFinalizationGroupRegister, 3, false);
+ Builtins::kFinalizationGroupRegister, 2, false);
SimpleInstallFunction(isolate(), finalization_group_prototype, "unregister",
Builtins::kFinalizationGroupUnregister, 1, false);
@@ -4315,7 +4326,6 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<Map> weak_ref_map =
factory->NewMap(JS_WEAK_REF_TYPE, JSWeakRef::kSize);
DCHECK(weak_ref_map->IsJSObjectMap());
- native_context()->set_js_weak_ref_map(*weak_ref_map);
Handle<JSObject> weak_ref_prototype = factory->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
@@ -4332,6 +4342,8 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<JSFunction> weak_ref_fun = CreateFunction(
isolate(), weak_ref_name, JS_WEAK_REF_TYPE, JSWeakRef::kSize, 0,
weak_ref_prototype, Builtins::kWeakRefConstructor);
+ InstallWithIntrinsicDefaultProto(isolate(), weak_ref_fun,
+ Context::JS_WEAK_REF_FUNCTION_INDEX);
weak_ref_fun->shared().DontAdaptArguments();
weak_ref_fun->shared().set_length(1);
@@ -4355,7 +4367,7 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
JSObject::ForceSetPrototype(cleanup_iterator_prototype, iterator_prototype);
InstallToStringTag(isolate(), cleanup_iterator_prototype,
- "JSFinalizationGroupCleanupIterator");
+ "FinalizationGroup Cleanup Iterator");
SimpleInstallFunction(isolate(), cleanup_iterator_prototype, "next",
Builtins::kFinalizationGroupCleanupIteratorNext, 0,
@@ -4498,12 +4510,6 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
#endif // V8_INTL_SUPPORT
-void Genesis::InitializeGlobal_harmony_object_from_entries() {
- if (!FLAG_harmony_object_from_entries) return;
- SimpleInstallFunction(isolate(), isolate()->object_function(), "fromEntries",
- Builtins::kObjectFromEntries, 1, false);
-}
-
Handle<JSFunction> Genesis::CreateArrayBuffer(
Handle<String> name, ArrayBufferKind array_buffer_kind) {
// Create the %ArrayBufferPrototype%
@@ -4598,7 +4604,7 @@ void Genesis::InstallInternalPackedArray(Handle<JSObject> target,
}
JSObject::NormalizeProperties(
- prototype, KEEP_INOBJECT_PROPERTIES, 6,
+ isolate(), prototype, KEEP_INOBJECT_PROPERTIES, 6,
"OptimizeInternalPackedArrayPrototypeForAdding");
InstallInternalPackedArrayFunction(prototype, "push");
InstallInternalPackedArrayFunction(prototype, "pop");
@@ -4681,14 +4687,14 @@ bool Genesis::InstallNatives() {
"Bootstrapping");
{
- // Builtin function for OpaqueReference -- a JSValue-based object,
- // that keeps its field isolated from JavaScript code. It may store
+ // Builtin function for OpaqueReference -- a JSPrimitiveWrapper-based
+ // object, that keeps its field isolated from JavaScript code. It may store
// objects, that JavaScript code may not access.
Handle<JSObject> prototype = factory()->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
- Handle<JSFunction> opaque_reference_fun =
- CreateFunction(isolate(), factory()->empty_string(), JS_VALUE_TYPE,
- JSValue::kSize, 0, prototype, Builtins::kIllegal);
+ Handle<JSFunction> opaque_reference_fun = CreateFunction(
+ isolate(), factory()->empty_string(), JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, prototype, Builtins::kIllegal);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@@ -5080,6 +5086,8 @@ bool Genesis::InstallExtensions(Isolate* isolate,
(!FLAG_trace_ignition_dispatches ||
InstallExtension(isolate, "v8/ignition-statistics",
&extension_states)) &&
+ (!isValidCpuTraceMarkFunctionName() ||
+ InstallExtension(isolate, "v8/cpumark", &extension_states)) &&
InstallRequestedExtensions(isolate, extensions, &extension_states);
}
@@ -5516,7 +5524,6 @@ Genesis::Genesis(Isolate* isolate,
Handle<Map> global_proxy_map = isolate->factory()->NewMap(
JS_GLOBAL_PROXY_TYPE, proxy_size, TERMINAL_FAST_ELEMENTS_KIND);
global_proxy_map->set_is_access_check_needed(true);
- global_proxy_map->set_has_hidden_prototype(true);
global_proxy_map->set_may_have_interesting_symbols(true);
// A remote global proxy has no native context.
@@ -5525,9 +5532,6 @@ Genesis::Genesis(Isolate* isolate,
// Configure the hidden prototype chain of the global proxy.
JSObject::ForceSetPrototype(global_proxy, global_object);
global_proxy->map().SetConstructor(*global_constructor);
- // TODO(dcheng): This is a hack. Why does this need to be manually called
- // here? Line 4812 should have taken care of it?
- global_proxy->map().set_has_hidden_prototype(true);
global_proxy_ = global_proxy;
}
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 2293dc67d7..ce5a4f1a8b 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -29,13 +29,15 @@
V(_, endRange_string, "endRange") \
V(_, engineering_string, "engineering") \
V(_, era_string, "era") \
- V(_, first_string, "first") \
- V(_, format_string, "format") \
- V(_, except_zero_string, "except-zero") \
+ V(_, exceptZero_string, "exceptZero") \
V(_, exponentInteger_string, "exponentInteger") \
V(_, exponentMinusSign_string, "exponentMinusSign") \
V(_, exponentSeparator_string, "exponentSeparator") \
+ V(_, first_string, "first") \
+ V(_, format_string, "format") \
V(_, fraction_string, "fraction") \
+ V(_, fractionalSecond_string, "fractionalSecond") \
+ V(_, fractionalSecondDigits_string, "fractionalSecondDigits") \
V(_, full_string, "full") \
V(_, granularity_string, "granularity") \
V(_, grapheme_string, "grapheme") \
@@ -68,7 +70,7 @@
V(_, minute_string, "minute") \
V(_, month_string, "month") \
V(_, nan_string, "nan") \
- V(_, narrow_symbol_string, "narrow-symbol") \
+ V(_, narrowSymbol_string, "narrowSymbol") \
V(_, never_string, "never") \
V(_, none_string, "none") \
V(_, notation_string, "notation") \
@@ -414,10 +416,12 @@
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_COPY_PARALLEL) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
@@ -445,15 +449,18 @@
F(MINOR_MC_EVACUATE) \
F(MINOR_MC_EVACUATE_CLEAN_UP) \
F(MINOR_MC_EVACUATE_COPY) \
+ F(MINOR_MC_EVACUATE_COPY_PARALLEL) \
F(MINOR_MC_EVACUATE_EPILOGUE) \
F(MINOR_MC_EVACUATE_PROLOGUE) \
F(MINOR_MC_EVACUATE_REBALANCE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_PARALLEL) \
F(MINOR_MC_MARK_SEED) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 98d5715411..85ef1f4d83 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -156,7 +156,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
if (Heap::ShouldZapGarbage()) {
for (Address address = committed_region_address;
address < committed_region_size; address += kSystemPointerSize) {
- Memory<Address>(address) = static_cast<Address>(kZapValue);
+ base::Memory<Address>(address) = static_cast<Address>(kZapValue);
}
}
}
diff --git a/deps/v8/src/init/setup-isolate-deserialize.cc b/deps/v8/src/init/setup-isolate-deserialize.cc
index 8a73ff0c8a..ff0268d3c8 100644
--- a/deps/v8/src/init/setup-isolate-deserialize.cc
+++ b/deps/v8/src/init/setup-isolate-deserialize.cc
@@ -7,7 +7,6 @@
#include "src/base/logging.h"
#include "src/execution/isolate.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 863940ef4b..d39e12c733 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -32,7 +32,7 @@ action("protocol_compatibility") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "$_inspector_protocol/check_protocol_compatibility.py"
inputs = [
- "js_protocol.pdl",
+ v8_inspector_js_protocol,
]
_stamp = "$target_gen_dir/js_protocol.stamp"
outputs = [
@@ -41,7 +41,7 @@ action("protocol_compatibility") {
args = [
"--stamp",
rebase_path(_stamp, root_build_dir),
- rebase_path("js_protocol.pdl", root_build_dir),
+ rebase_path(v8_inspector_js_protocol, root_build_dir),
]
}
@@ -53,10 +53,10 @@ inspector_protocol_generate("protocol_generated_sources") {
inspector_protocol_dir = _inspector_protocol
out_dir = target_gen_dir
- config_file = "inspector_protocol_config.json"
+ config_file = v8_path_prefix + "/src/inspector/inspector_protocol_config.json"
inputs = [
- "js_protocol.pdl",
- "inspector_protocol_config.json",
+ v8_inspector_js_protocol,
+ config_file,
]
outputs = _protocol_generated
}
@@ -65,7 +65,9 @@ config("inspector_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
configs = [ "../../:internal_config" ]
- include_dirs = [ "../../include" ]
+ include_dirs = [
+ "../../include",
+ ]
}
v8_header_set("inspector_test_headers") {
@@ -96,6 +98,7 @@ v8_source_set("inspector") {
":inspector_string_conversions",
"../..:v8_version",
"../../third_party/inspector_protocol:encoding",
+ "../../third_party/inspector_protocol:bindings",
]
public_deps = [
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 5122d5d997..e5fa06fd54 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -5,11 +5,12 @@ include_rules = [
"+src/base/compiler-specific.h",
"+src/base/logging.h",
"+src/base/macros.h",
+ "+src/base/memory.h",
"+src/base/platform/platform.h",
"+src/base/platform/mutex.h",
"+src/base/safe_conversions.h",
+ "+src/base/template-utils.h",
"+src/base/v8-fallthrough.h",
- "+src/common/v8memory.h",
"+src/numbers/conversions.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index 55f8ac7875..a979205084 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
alph@chromium.org
caseq@chromium.org
dgozman@chromium.org
@@ -7,12 +5,6 @@ kozyatinskiy@chromium.org
pfeldman@chromium.org
yangguo@chromium.org
-# Changes to remote debugging protocol require devtools review to
-# ensure backwards compatibility and commitment to maintain.
-per-file js_protocol.pdl=set noparent
-per-file js_protocol.pdl=dgozman@chromium.org
-per-file js_protocol.pdl=pfeldman@chromium.org
-
per-file PRESUBMIT.py=file://INFRA_OWNERS
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index e660a61aeb..1edd559e4e 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -284,7 +284,7 @@ Response InjectedScript::getProperties(
int sessionId = m_sessionId;
v8::TryCatch tryCatch(isolate);
- *properties = Array<PropertyDescriptor>::create();
+ *properties = v8::base::make_unique<Array<PropertyDescriptor>>();
std::vector<PropertyMirror> mirrors;
PropertyAccumulator accumulator(&mirrors);
if (!ValueMirror::getProperties(context, object, ownProperties,
@@ -351,7 +351,7 @@ Response InjectedScript::getProperties(
descriptor->setValue(std::move(remoteObject));
descriptor->setWasThrown(true);
}
- (*properties)->addItem(std::move(descriptor));
+ (*properties)->emplace_back(std::move(descriptor));
}
return Response::OK();
}
@@ -362,8 +362,10 @@ Response InjectedScript::getInternalAndPrivateProperties(
internalProperties,
std::unique_ptr<protocol::Array<PrivatePropertyDescriptor>>*
privateProperties) {
- *internalProperties = protocol::Array<InternalPropertyDescriptor>::create();
- *privateProperties = protocol::Array<PrivatePropertyDescriptor>::create();
+ *internalProperties =
+ v8::base::make_unique<Array<InternalPropertyDescriptor>>();
+ *privateProperties =
+ v8::base::make_unique<Array<PrivatePropertyDescriptor>>();
if (!value->IsObject()) return Response::OK();
@@ -384,10 +386,10 @@ Response InjectedScript::getInternalAndPrivateProperties(
groupName, remoteObject.get());
if (!response.isSuccess()) return response;
(*internalProperties)
- ->addItem(InternalPropertyDescriptor::create()
- .setName(internalProperty.name)
- .setValue(std::move(remoteObject))
- .build());
+ ->emplace_back(InternalPropertyDescriptor::create()
+ .setName(internalProperty.name)
+ .setValue(std::move(remoteObject))
+ .build());
}
std::vector<PrivatePropertyMirror> privatePropertyWrappers =
ValueMirror::getPrivateProperties(m_context->context(), value_obj);
@@ -401,10 +403,10 @@ Response InjectedScript::getInternalAndPrivateProperties(
groupName, remoteObject.get());
if (!response.isSuccess()) return response;
(*privateProperties)
- ->addItem(PrivatePropertyDescriptor::create()
- .setName(privateProperty.name)
- .setValue(std::move(remoteObject))
- .build());
+ ->emplace_back(PrivatePropertyDescriptor::create()
+ .setName(privateProperty.name)
+ .setValue(std::move(remoteObject))
+ .build());
}
return Response::OK();
}
@@ -487,7 +489,6 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
&limit, &limit, &preview);
if (!preview) return nullptr;
- Array<PropertyPreview>* columns = preview->getProperties();
std::unordered_set<String16> selectedColumns;
v8::Local<v8::Array> v8Columns;
if (maybeColumns.ToLocal(&v8Columns)) {
@@ -500,18 +501,17 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
}
}
if (!selectedColumns.empty()) {
- for (size_t i = 0; i < columns->length(); ++i) {
- ObjectPreview* columnPreview = columns->get(i)->getValuePreview(nullptr);
+ for (const std::unique_ptr<PropertyPreview>& column :
+ *preview->getProperties()) {
+ ObjectPreview* columnPreview = column->getValuePreview(nullptr);
if (!columnPreview) continue;
- std::unique_ptr<Array<PropertyPreview>> filtered =
- Array<PropertyPreview>::create();
- Array<PropertyPreview>* columns = columnPreview->getProperties();
- for (size_t j = 0; j < columns->length(); ++j) {
- PropertyPreview* property = columns->get(j);
+ auto filtered = v8::base::make_unique<Array<PropertyPreview>>();
+ for (const std::unique_ptr<PropertyPreview>& property :
+ *columnPreview->getProperties()) {
if (selectedColumns.find(property->getName()) !=
selectedColumns.end()) {
- filtered->addItem(property->clone());
+ filtered->emplace_back(property->clone());
}
}
columnPreview->setProperties(std::move(filtered));
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index c4aa29ce99..684940c885 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -1,6 +1,6 @@
{
"protocol": {
- "path": "js_protocol.pdl",
+ "path": "../../include/js_protocol.pdl",
"package": "src/inspector/protocol",
"output": "protocol",
"namespace": ["v8_inspector", "protocol"],
@@ -44,5 +44,13 @@
"package": "src/inspector/protocol",
"output": "protocol",
"string_header": "src/inspector/string-util.h"
+ },
+
+ "encoding_lib": {
+ "namespace": "v8_inspector_protocol_encoding"
+ },
+
+ "bindings_lib": {
+ "namespace": "v8_inspector_protocol_bindings"
}
}
diff --git a/deps/v8/src/inspector/js_protocol-1.2.json b/deps/v8/src/inspector/js_protocol-1.2.json
deleted file mode 100644
index aff6806222..0000000000
--- a/deps/v8/src/inspector/js_protocol-1.2.json
+++ /dev/null
@@ -1,997 +0,0 @@
-{
- "version": { "major": "1", "minor": "2" },
- "domains": [
- {
- "domain": "Schema",
- "description": "Provides information about the protocol schema.",
- "types": [
- {
- "id": "Domain",
- "type": "object",
- "description": "Description of the protocol domain.",
- "exported": true,
- "properties": [
- { "name": "name", "type": "string", "description": "Domain name." },
- { "name": "version", "type": "string", "description": "Domain version." }
- ]
- }
- ],
- "commands": [
- {
- "name": "getDomains",
- "description": "Returns supported domains.",
- "handlers": ["browser", "renderer"],
- "returns": [
- { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
- ]
- }
- ]
- },
- {
- "domain": "Runtime",
- "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
- "types": [
- {
- "id": "ScriptId",
- "type": "string",
- "description": "Unique script identifier."
- },
- {
- "id": "RemoteObjectId",
- "type": "string",
- "description": "Unique object identifier."
- },
- {
- "id": "UnserializableValue",
- "type": "string",
- "enum": ["Infinity", "NaN", "-Infinity", "-0"],
- "description": "Primitive value which cannot be JSON-stringified."
- },
- {
- "id": "RemoteObject",
- "type": "object",
- "description": "Mirror object referencing original JavaScript object.",
- "exported": true,
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
- { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
- { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
- { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
- ]
- },
- {
- "id": "CustomPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "header", "type": "string"},
- { "name": "hasBody", "type": "boolean"},
- { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
- { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
- { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
- ]
- },
- {
- "id": "ObjectPreview",
- "type": "object",
- "experimental": true,
- "description": "Object containing abbreviated remote object value.",
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
- { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
- { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
- ]
- },
- {
- "id": "PropertyPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "name", "type": "string", "description": "Property name." },
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
- { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
- { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
- ]
- },
- {
- "id": "EntryPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
- { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
- ]
- },
- {
- "id": "PropertyDescriptor",
- "type": "object",
- "description": "Object property descriptor.",
- "properties": [
- { "name": "name", "type": "string", "description": "Property name or symbol description." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
- { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
- { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
- { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
- { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
- { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
- { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
- { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
- ]
- },
- {
- "id": "InternalPropertyDescriptor",
- "type": "object",
- "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
- "properties": [
- { "name": "name", "type": "string", "description": "Conventional property name." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
- ]
- },
- {
- "id": "CallArgument",
- "type": "object",
- "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
- "properties": [
- { "name": "value", "type": "any", "optional": true, "description": "Primitive value." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
- ]
- },
- {
- "id": "ExecutionContextId",
- "type": "integer",
- "description": "Id of an execution context."
- },
- {
- "id": "ExecutionContextDescription",
- "type": "object",
- "description": "Description of an isolated world.",
- "properties": [
- { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
- { "name": "origin", "type": "string", "description": "Execution context origin." },
- { "name": "name", "type": "string", "description": "Human readable name describing given context." },
- { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
- ]
- },
- {
- "id": "ExceptionDetails",
- "type": "object",
- "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
- "properties": [
- { "name": "exceptionId", "type": "integer", "description": "Exception id." },
- { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
- { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
- { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
- ]
- },
- {
- "id": "Timestamp",
- "type": "number",
- "description": "Number of milliseconds since epoch."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "description": "Stack entry for runtime errors and assertions.",
- "properties": [
- { "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
- ]
- },
- {
- "id": "StackTrace",
- "type": "object",
- "description": "Call frames for assertions or error messages.",
- "exported": true,
- "properties": [
- { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
- { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
- ]
- }
- ],
- "commands": [
- {
- "name": "evaluate",
- "async": true,
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on global object."
- },
- {
- "name": "awaitPromise",
- "async": true,
- "parameters": [
- { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
- ],
- "description": "Add handler to promise with given promise object id."
- },
- {
- "name": "callFunctionOn",
- "async": true,
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
- { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
- { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "experimental": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "getProperties",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
- { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
- { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
- { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "releaseObject",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
- ],
- "description": "Releases remote object with given id."
- },
- {
- "name": "releaseObjectGroup",
- "parameters": [
- { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
- ],
- "description": "Releases all remote objects that belong to a given group."
- },
- {
- "name": "runIfWaitingForDebugger",
- "description": "Tells inspected instance to run if it was waiting for debugger to attach."
- },
- {
- "name": "enable",
- "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
- },
- {
- "name": "disable",
- "description": "Disables reporting of execution contexts creation."
- },
- {
- "name": "discardConsoleEntries",
- "description": "Discards collected exceptions and console API calls."
- },
- {
- "name": "setCustomObjectFormatterEnabled",
- "parameters": [
- {
- "name": "enabled",
- "type": "boolean"
- }
- ],
- "experimental": true
- },
- {
- "name": "compileScript",
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to compile." },
- { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
- { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
- ],
- "returns": [
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Compiles expression."
- },
- {
- "name": "runScript",
- "async": true,
- "parameters": [
- { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
- { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Runs script with given id in a given context."
- }
- ],
- "events": [
- {
- "name": "executionContextCreated",
- "parameters": [
- { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution contex." }
- ],
- "description": "Issued when new execution context is created."
- },
- {
- "name": "executionContextDestroyed",
- "parameters": [
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
- ],
- "description": "Issued when execution context is destroyed."
- },
- {
- "name": "executionContextsCleared",
- "description": "Issued when all executionContexts were cleared in browser"
- },
- {
- "name": "exceptionThrown",
- "description": "Issued when exception was thrown and unhandled.",
- "parameters": [
- { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
- ]
- },
- {
- "name": "exceptionRevoked",
- "description": "Issued when unhandled exception was revoked.",
- "parameters": [
- { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
- { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionUnhandled</code>." }
- ]
- },
- {
- "name": "consoleAPICalled",
- "description": "Issued when console API was called.",
- "parameters": [
- { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd"], "description": "Type of the call." },
- { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
- { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." }
- ]
- },
- {
- "name": "inspectRequested",
- "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
- "parameters": [
- { "name": "object", "$ref": "RemoteObject" },
- { "name": "hints", "type": "object" }
- ]
- }
- ]
- },
- {
- "domain": "Debugger",
- "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
- "dependencies": ["Runtime"],
- "types": [
- {
- "id": "BreakpointId",
- "type": "string",
- "description": "Breakpoint identifier."
- },
- {
- "id": "CallFrameId",
- "type": "string",
- "description": "Call frame identifier."
- },
- {
- "id": "Location",
- "type": "object",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
- { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "ScriptPosition",
- "experimental": true,
- "type": "object",
- "properties": [
- { "name": "lineNumber", "type": "integer" },
- { "name": "columnNumber", "type": "integer" }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "properties": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
- { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
- { "name": "functionLocation", "$ref": "Location", "optional": true, "experimental": true, "description": "Location in the source code." },
- { "name": "location", "$ref": "Location", "description": "Location in the source code." },
- { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
- { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
- { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
- ],
- "description": "JavaScript call frame. Array of call frames form the call stack."
- },
- {
- "id": "Scope",
- "type": "object",
- "properties": [
- { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
- { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
- { "name": "name", "type": "string", "optional": true },
- { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
- { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
- ],
- "description": "Scope description."
- },
- {
- "id": "SearchMatch",
- "type": "object",
- "description": "Search match for resource.",
- "exported": true,
- "properties": [
- { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
- { "name": "lineContent", "type": "string", "description": "Line with match content." }
- ],
- "experimental": true
- }
- ],
- "commands": [
- {
- "name": "enable",
- "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
- },
- {
- "name": "disable",
- "description": "Disables debugger for given page."
- },
- {
- "name": "setBreakpointsActive",
- "parameters": [
- { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
- ],
- "description": "Activates / deactivates all breakpoints on the page."
- },
- {
- "name": "setSkipAllPauses",
- "parameters": [
- { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
- ],
- "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
- },
- {
- "name": "setBreakpointByUrl",
- "parameters": [
- { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
- { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
- ],
- "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
- },
- {
- "name": "setBreakpoint",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
- ],
- "description": "Sets JavaScript breakpoint at a given location."
- },
- {
- "name": "removeBreakpoint",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId" }
- ],
- "description": "Removes JavaScript breakpoint."
- },
- {
- "name": "continueToLocation",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to continue to." }
- ],
- "description": "Continues execution until specific location is reached."
- },
- {
- "name": "stepOver",
- "description": "Steps over the statement."
- },
- {
- "name": "stepInto",
- "description": "Steps into the function call."
- },
- {
- "name": "stepOut",
- "description": "Steps out of the function call."
- },
- {
- "name": "pause",
- "description": "Stops on the next JavaScript statement."
- },
- {
- "name": "resume",
- "description": "Resumes JavaScript execution."
- },
- {
- "name": "searchInContent",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
- { "name": "query", "type": "string", "description": "String to search for." },
- { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
- { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
- ],
- "experimental": true,
- "description": "Searches for given string in script content."
- },
- {
- "name": "setScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
- { "name": "scriptSource", "type": "string", "description": "New content of the script." },
- { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
- { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
- ],
- "description": "Edits JavaScript source live."
- },
- {
- "name": "restartFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
- ],
- "description": "Restarts particular call frame from the beginning."
- },
- {
- "name": "getScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
- ],
- "returns": [
- { "name": "scriptSource", "type": "string", "description": "Script source." }
- ],
- "description": "Returns source for the script with given id."
- },
- {
- "name": "setPauseOnExceptions",
- "parameters": [
- { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
- ],
- "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
- },
- {
- "name": "evaluateOnCallFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
- { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on a given call frame."
- },
- {
- "name": "setVariableValue",
- "parameters": [
- { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
- { "name": "variableName", "type": "string", "description": "Variable name." },
- { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
- ],
- "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
- },
- {
- "name": "setAsyncCallStackDepth",
- "parameters": [
- { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
- ],
- "description": "Enables or disables async call stacks tracking."
- },
- {
- "name": "setBlackboxPatterns",
- "parameters": [
- { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
- ],
- "experimental": true,
- "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
- },
- {
- "name": "setBlackboxedRanges",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
- { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
- ],
- "experimental": true,
- "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
- }
- ],
- "events": [
- {
- "name": "scriptParsed",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
- ],
- "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
- },
- {
- "name": "scriptFailedToParse",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
- ],
- "description": "Fired when virtual machine fails to parse the script."
- },
- {
- "name": "breakpointResolved",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
- { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
- ],
- "description": "Fired when breakpoint is resolved to an actual script and location."
- },
- {
- "name": "paused",
- "parameters": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
- { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
- { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
- { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
- ],
- "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
- },
- {
- "name": "resumed",
- "description": "Fired when the virtual machine resumed execution."
- }
- ]
- },
- {
- "domain": "Console",
- "description": "This domain is deprecated - use Runtime or Log instead.",
- "dependencies": ["Runtime"],
- "deprecated": true,
- "types": [
- {
- "id": "ConsoleMessage",
- "type": "object",
- "description": "Console message.",
- "properties": [
- { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
- { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
- { "name": "text", "type": "string", "description": "Message text." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
- { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
- { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable",
- "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
- },
- {
- "name": "disable",
- "description": "Disables console domain, prevents further console messages from being reported to the client."
- },
- {
- "name": "clearMessages",
- "description": "Does nothing."
- }
- ],
- "events": [
- {
- "name": "messageAdded",
- "parameters": [
- { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
- ],
- "description": "Issued when new console message is added."
- }
- ]
- },
- {
- "domain": "Profiler",
- "dependencies": ["Runtime", "Debugger"],
- "types": [
- {
- "id": "ProfileNode",
- "type": "object",
- "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
- "properties": [
- { "name": "id", "type": "integer", "description": "Unique id of the node." },
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "hitCount", "type": "integer", "optional": true, "experimental": true, "description": "Number of samples where this node was on top of the call stack." },
- { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
- { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
- { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "experimental": true, "description": "An array of source position ticks." }
- ]
- },
- {
- "id": "Profile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
- { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
- { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
- { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
- { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
- ]
- },
- {
- "id": "PositionTickInfo",
- "type": "object",
- "experimental": true,
- "description": "Specifies a number of samples attributed to a certain source position.",
- "properties": [
- { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
- { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "setSamplingInterval",
- "parameters": [
- { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
- ],
- "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
- },
- {
- "name": "start"
- },
- {
- "name": "stop",
- "returns": [
- { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
- ]
- }
- ],
- "events": [
- {
- "name": "consoleProfileStarted",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ],
- "description": "Sent when new profile recodring is started using console.profile() call."
- },
- {
- "name": "consoleProfileFinished",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
- { "name": "profile", "$ref": "Profile" },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ]
- }
- ]
- },
- {
- "domain": "HeapProfiler",
- "dependencies": ["Runtime"],
- "experimental": true,
- "types": [
- {
- "id": "HeapSnapshotObjectId",
- "type": "string",
- "description": "Heap snapshot object id."
- },
- {
- "id": "SamplingHeapProfileNode",
- "type": "object",
- "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
- "properties": [
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
- { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
- ]
- },
- {
- "id": "SamplingHeapProfile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "head", "$ref": "SamplingHeapProfileNode" }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "startTrackingHeapObjects",
- "parameters": [
- { "name": "trackAllocations", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "stopTrackingHeapObjects",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
- ]
- },
- {
- "name": "takeHeapSnapshot",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
- ]
- },
- {
- "name": "collectGarbage"
- },
- {
- "name": "getObjectByHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
- ]
- },
- {
- "name": "addInspectedHeapObject",
- "parameters": [
- { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
- ],
- "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
- },
- {
- "name": "getHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
- ],
- "returns": [
- { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
- ]
- },
- {
- "name": "startSampling",
- "parameters": [
- { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
- ]
- },
- {
- "name": "stopSampling",
- "returns": [
- { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
- ]
- }
- ],
- "events": [
- {
- "name": "addHeapSnapshotChunk",
- "parameters": [
- { "name": "chunk", "type": "string" }
- ]
- },
- {
- "name": "resetProfiles"
- },
- {
- "name": "reportHeapSnapshotProgress",
- "parameters": [
- { "name": "done", "type": "integer" },
- { "name": "total", "type": "integer" },
- { "name": "finished", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "lastSeenObjectId",
- "description": "If heap objects tracking has been started then backend regulary sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
- "parameters": [
- { "name": "lastSeenObjectId", "type": "integer" },
- { "name": "timestamp", "type": "number" }
- ]
- },
- {
- "name": "heapStatsUpdate",
- "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
- "parameters": [
- { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
- ]
- }
- ]
- }]
-}
diff --git a/deps/v8/src/inspector/js_protocol-1.3.json b/deps/v8/src/inspector/js_protocol-1.3.json
deleted file mode 100644
index ea573d11a6..0000000000
--- a/deps/v8/src/inspector/js_protocol-1.3.json
+++ /dev/null
@@ -1,1205 +0,0 @@
-{
- "version": { "major": "1", "minor": "3" },
- "domains": [
- {
- "domain": "Schema",
- "description": "This domain is deprecated.",
- "deprecated": true,
- "types": [
- {
- "id": "Domain",
- "type": "object",
- "description": "Description of the protocol domain.",
- "properties": [
- { "name": "name", "type": "string", "description": "Domain name." },
- { "name": "version", "type": "string", "description": "Domain version." }
- ]
- }
- ],
- "commands": [
- {
- "name": "getDomains",
- "description": "Returns supported domains.",
- "handlers": ["browser", "renderer"],
- "returns": [
- { "name": "domains", "type": "array", "items": { "$ref": "Domain" }, "description": "List of supported domains." }
- ]
- }
- ]
- },
- {
- "domain": "Runtime",
- "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
- "types": [
- {
- "id": "ScriptId",
- "type": "string",
- "description": "Unique script identifier."
- },
- {
- "id": "RemoteObjectId",
- "type": "string",
- "description": "Unique object identifier."
- },
- {
- "id": "UnserializableValue",
- "type": "string",
- "enum": ["Infinity", "NaN", "-Infinity", "-0"],
- "description": "Primitive value which cannot be JSON-stringified."
- },
- {
- "id": "RemoteObject",
- "type": "object",
- "description": "Mirror object referencing original JavaScript object.",
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
- { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested)." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified does not have <code>value</code>, but gets this property." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
- { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "experimental": true },
- { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "experimental": true}
- ]
- },
- {
- "id": "CustomPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "header", "type": "string"},
- { "name": "hasBody", "type": "boolean"},
- { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
- { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
- { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
- ]
- },
- {
- "id": "ObjectPreview",
- "type": "object",
- "experimental": true,
- "description": "Object containing abbreviated remote object value.",
- "properties": [
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
- { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
- { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
- { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
- { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
- ]
- },
- {
- "id": "PropertyPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "name", "type": "string", "description": "Property name." },
- { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
- { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
- { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
- { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "weakmap", "weakset", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
- ]
- },
- {
- "id": "EntryPreview",
- "type": "object",
- "experimental": true,
- "properties": [
- { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
- { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
- ]
- },
- {
- "id": "PropertyDescriptor",
- "type": "object",
- "description": "Object property descriptor.",
- "properties": [
- { "name": "name", "type": "string", "description": "Property name or symbol description." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
- { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
- { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
- { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
- { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
- { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
- { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
- { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object." },
- { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type." }
- ]
- },
- {
- "id": "InternalPropertyDescriptor",
- "type": "object",
- "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
- "properties": [
- { "name": "name", "type": "string", "description": "Conventional property name." },
- { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
- ]
- },
- {
- "id": "CallArgument",
- "type": "object",
- "description": "Represents function call argument. Either remote object id <code>objectId</code>, primitive <code>value</code>, unserializable primitive value or neither of (for undefined) them should be specified.",
- "properties": [
- { "name": "value", "type": "any", "optional": true, "description": "Primitive value or serializable javascript object." },
- { "name": "unserializableValue", "$ref": "UnserializableValue", "optional": true, "description": "Primitive value which can not be JSON-stringified." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." }
- ]
- },
- {
- "id": "ExecutionContextId",
- "type": "integer",
- "description": "Id of an execution context."
- },
- {
- "id": "ExecutionContextDescription",
- "type": "object",
- "description": "Description of an isolated world.",
- "properties": [
- { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
- { "name": "origin", "type": "string", "description": "Execution context origin." },
- { "name": "name", "type": "string", "description": "Human readable name describing given context." },
- { "name": "auxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." }
- ]
- },
- {
- "id": "ExceptionDetails",
- "type": "object",
- "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
- "properties": [
- { "name": "exceptionId", "type": "integer", "description": "Exception id." },
- { "name": "text", "type": "string", "description": "Exception text, which should be used together with exception object when available." },
- { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Script ID of the exception location." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." },
- { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object if available." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
- ]
- },
- {
- "id": "Timestamp",
- "type": "number",
- "description": "Number of milliseconds since epoch."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "description": "Stack entry for runtime errors and assertions.",
- "properties": [
- { "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
- { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
- ]
- },
- {
- "id": "StackTrace",
- "type": "object",
- "description": "Call frames for assertions or error messages.",
- "properties": [
- { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
- { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." },
- { "name": "parentId", "$ref": "StackTraceId", "optional": true, "experimental": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
- ]
- },
- {
- "id": "UniqueDebuggerId",
- "type": "string",
- "description": "Unique identifier of current debugger.",
- "experimental": true
- },
- {
- "id": "StackTraceId",
- "type": "object",
- "description": "If <code>debuggerId</code> is set stack trace comes from another debugger and can be resolved there. This allows to track cross-debugger calls. See <code>Runtime.StackTrace</code> and <code>Debugger.paused</code> for usages.",
- "properties": [
- { "name": "id", "type": "string" },
- { "name": "debuggerId", "$ref": "UniqueDebuggerId", "optional": true }
- ],
- "experimental": true
- }
- ],
- "commands": [
- {
- "name": "evaluate",
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on global object."
- },
- {
- "name": "awaitPromise",
- "parameters": [
- { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
- ],
- "description": "Add handler to promise with given promise object id."
- },
- {
- "name": "callFunctionOn",
- "parameters": [
- { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
- { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Identifier of the object to call function on. Either objectId or executionContextId should be specified." },
- { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "userGesture", "type": "boolean", "optional": true, "description": "Whether execution should be treated as initiated by user in the UI." },
- { "name": "awaitPromise", "type": "boolean", "optional":true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "getProperties",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
- { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
- { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "experimental": true },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the results." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
- { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself)." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
- },
- {
- "name": "releaseObject",
- "parameters": [
- { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
- ],
- "description": "Releases remote object with given id."
- },
- {
- "name": "releaseObjectGroup",
- "parameters": [
- { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
- ],
- "description": "Releases all remote objects that belong to a given group."
- },
- {
- "name": "runIfWaitingForDebugger",
- "description": "Tells inspected instance to run if it was waiting for debugger to attach."
- },
- {
- "name": "enable",
- "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
- },
- {
- "name": "disable",
- "description": "Disables reporting of execution contexts creation."
- },
- {
- "name": "discardConsoleEntries",
- "description": "Discards collected exceptions and console API calls."
- },
- {
- "name": "setCustomObjectFormatterEnabled",
- "parameters": [
- {
- "name": "enabled",
- "type": "boolean"
- }
- ],
- "experimental": true
- },
- {
- "name": "compileScript",
- "parameters": [
- { "name": "expression", "type": "string", "description": "Expression to compile." },
- { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
- { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." }
- ],
- "returns": [
- { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Compiles expression."
- },
- {
- "name": "runScript",
- "parameters": [
- { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." },
- { "name": "awaitPromise", "type": "boolean", "optional": true, "description": "Whether execution should <code>await</code> for resulting value and return once awaited promise is resolved." }
- ],
- "returns": [
- { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Runs script with given id in a given context."
- },
- {
- "name": "queryObjects",
- "parameters": [
- { "name": "prototypeObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the prototype to return objects for." }
- ],
- "returns": [
- { "name": "objects", "$ref": "RemoteObject", "description": "Array with objects." }
- ]
- },
- {
- "name": "globalLexicalScopeNames",
- "parameters": [
- { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to lookup global scope variables." }
- ],
- "returns": [
- { "name": "names", "type": "array", "items": { "type": "string" } }
- ],
- "description": "Returns all let, const and class variables from global scope."
- }
- ],
- "events": [
- {
- "name": "executionContextCreated",
- "parameters": [
- { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution context." }
- ],
- "description": "Issued when new execution context is created."
- },
- {
- "name": "executionContextDestroyed",
- "parameters": [
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
- ],
- "description": "Issued when execution context is destroyed."
- },
- {
- "name": "executionContextsCleared",
- "description": "Issued when all executionContexts were cleared in browser"
- },
- {
- "name": "exceptionThrown",
- "description": "Issued when exception was thrown and unhandled.",
- "parameters": [
- { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
- { "name": "exceptionDetails", "$ref": "ExceptionDetails" }
- ]
- },
- {
- "name": "exceptionRevoked",
- "description": "Issued when unhandled exception was revoked.",
- "parameters": [
- { "name": "reason", "type": "string", "description": "Reason describing why exception was revoked." },
- { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionThrown</code>." }
- ]
- },
- {
- "name": "consoleAPICalled",
- "description": "Issued when console API was called.",
- "parameters": [
- { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd", "count", "timeEnd"], "description": "Type of the call." },
- { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
- { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
- { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
- { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." },
- { "name": "context", "type": "string", "optional": true, "experimental": true, "description": "Console context descriptor for calls on non-default console context (not console.*): 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call on named context." }
- ]
- },
- {
- "name": "inspectRequested",
- "description": "Issued when object should be inspected (for example, as a result of inspect() command line API call).",
- "parameters": [
- { "name": "object", "$ref": "RemoteObject" },
- { "name": "hints", "type": "object" }
- ]
- }
- ]
- },
- {
- "domain": "Debugger",
- "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
- "dependencies": ["Runtime"],
- "types": [
- {
- "id": "BreakpointId",
- "type": "string",
- "description": "Breakpoint identifier."
- },
- {
- "id": "CallFrameId",
- "type": "string",
- "description": "Call frame identifier."
- },
- {
- "id": "Location",
- "type": "object",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
- { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "ScriptPosition",
- "experimental": true,
- "type": "object",
- "properties": [
- { "name": "lineNumber", "type": "integer" },
- { "name": "columnNumber", "type": "integer" }
- ],
- "description": "Location in the source code."
- },
- {
- "id": "CallFrame",
- "type": "object",
- "properties": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
- { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
- { "name": "functionLocation", "$ref": "Location", "optional": true, "description": "Location in the source code." },
- { "name": "location", "$ref": "Location", "description": "Location in the source code." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
- { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
- { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "description": "The value being returned, if the function is at return point." }
- ],
- "description": "JavaScript call frame. Array of call frames form the call stack."
- },
- {
- "id": "Scope",
- "type": "object",
- "properties": [
- { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script", "eval", "module"], "description": "Scope type." },
- { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
- { "name": "name", "type": "string", "optional": true },
- { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
- { "name": "endLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope ends" }
- ],
- "description": "Scope description."
- },
- {
- "id": "SearchMatch",
- "type": "object",
- "description": "Search match for resource.",
- "properties": [
- { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
- { "name": "lineContent", "type": "string", "description": "Line with match content." }
- ]
- },
- {
- "id": "BreakLocation",
- "type": "object",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
- { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." },
- { "name": "type", "type": "string", "enum": [ "debuggerStatement", "call", "return" ], "optional": true }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable",
- "returns": [
- { "name": "debuggerId", "$ref": "Runtime.UniqueDebuggerId", "experimental": true, "description": "Unique identifier of the debugger." }
- ],
- "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
- },
- {
- "name": "disable",
- "description": "Disables debugger for given page."
- },
- {
- "name": "setBreakpointsActive",
- "parameters": [
- { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
- ],
- "description": "Activates / deactivates all breakpoints on the page."
- },
- {
- "name": "setSkipAllPauses",
- "parameters": [
- { "name": "skip", "type": "boolean", "description": "New value for skip pauses state." }
- ],
- "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
- },
- {
- "name": "setBreakpointByUrl",
- "parameters": [
- { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
- { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
- { "name": "scriptHash", "type": "string", "optional": true, "description": "Script hash of the resources to set breakpoint on." },
- { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
- ],
- "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
- },
- {
- "name": "setBreakpoint",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
- { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
- ],
- "returns": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
- { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
- ],
- "description": "Sets JavaScript breakpoint at a given location."
- },
- {
- "name": "removeBreakpoint",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId" }
- ],
- "description": "Removes JavaScript breakpoint."
- },
- {
- "name": "getPossibleBreakpoints",
- "parameters": [
- { "name": "start", "$ref": "Location", "description": "Start of range to search possible breakpoint locations in." },
- { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end of scripts is used as end of range." },
- { "name": "restrictToFunction", "type": "boolean", "optional": true, "description": "Only consider locations which are in the same (non-nested) function as start." }
- ],
- "returns": [
- { "name": "locations", "type": "array", "items": { "$ref": "BreakLocation" }, "description": "List of the possible breakpoint locations." }
- ],
- "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same."
- },
- {
- "name": "continueToLocation",
- "parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to continue to." },
- { "name": "targetCallFrames", "type": "string", "enum": ["any", "current"], "optional": true }
- ],
- "description": "Continues execution until specific location is reached."
- },
- {
- "name": "pauseOnAsyncCall",
- "parameters": [
- { "name": "parentStackTraceId", "$ref": "Runtime.StackTraceId", "description": "Debugger will pause when async call with given stack trace is started." }
- ],
- "experimental": true
- },
- {
- "name": "stepOver",
- "description": "Steps over the statement."
- },
- {
- "name": "stepInto",
- "parameters": [
- { "name": "breakOnAsyncCall", "type": "boolean", "optional": true, "experimental": true, "description": "Debugger will issue additional Debugger.paused notification if any async task is scheduled before next pause." }
- ],
- "description": "Steps into the function call."
- },
- {
- "name": "stepOut",
- "description": "Steps out of the function call."
- },
- {
- "name": "pause",
- "description": "Stops on the next JavaScript statement."
- },
- {
- "name": "scheduleStepIntoAsync",
- "description": "This method is deprecated - use Debugger.stepInto with breakOnAsyncCall and Debugger.pauseOnAsyncTask instead. Steps into next scheduled async task if any is scheduled before next pause. Returns success when async task is actually scheduled, returns error if no task were scheduled or another scheduleStepIntoAsync was called.",
- "experimental": true
- },
- {
- "name": "resume",
- "description": "Resumes JavaScript execution."
- },
- {
- "name": "getStackTrace",
- "parameters": [
- { "name": "stackTraceId", "$ref": "Runtime.StackTraceId" }
- ],
- "returns": [
- { "name": "stackTrace", "$ref": "Runtime.StackTrace" }
- ],
- "description": "Returns stack trace with given <code>stackTraceId</code>.",
- "experimental": true
- },
- {
- "name": "searchInContent",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
- { "name": "query", "type": "string", "description": "String to search for." },
- { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
- { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
- ],
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
- ],
- "description": "Searches for given string in script content."
- },
- {
- "name": "setScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
- { "name": "scriptSource", "type": "string", "description": "New content of the script." },
- { "name": "dryRun", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
- { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
- { "name": "exceptionDetails", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Exception details if any." }
- ],
- "description": "Edits JavaScript source live."
- },
- {
- "name": "restartFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
- ],
- "returns": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." }
- ],
- "description": "Restarts particular call frame from the beginning."
- },
- {
- "name": "getScriptSource",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
- ],
- "returns": [
- { "name": "scriptSource", "type": "string", "description": "Script source." }
- ],
- "description": "Returns source for the script with given id."
- },
- {
- "name": "setPauseOnExceptions",
- "parameters": [
- { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
- ],
- "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
- },
- {
- "name": "evaluateOnCallFrame",
- "parameters": [
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
- { "name": "expression", "type": "string", "description": "Expression to evaluate." },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
- { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
- { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
- { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
- { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
- { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
- { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "description": "Exception details."}
- ],
- "description": "Evaluates expression on a given call frame."
- },
- {
- "name": "setVariableValue",
- "parameters": [
- { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
- { "name": "variableName", "type": "string", "description": "Variable name." },
- { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
- { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
- ],
- "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
- },
- {
- "name": "setReturnValue",
- "parameters": [
- { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New return value." }
- ],
- "experimental": true,
- "description": "Changes return value in top frame. Available only at return break position."
- },
- {
- "name": "setAsyncCallStackDepth",
- "parameters": [
- { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
- ],
- "description": "Enables or disables async call stacks tracking."
- },
- {
- "name": "setBlackboxPatterns",
- "parameters": [
- { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
- ],
- "experimental": true,
- "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
- },
- {
- "name": "setBlackboxedRanges",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
- { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
- ],
- "experimental": true,
- "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
- }
- ],
- "events": [
- {
- "name": "scriptParsed",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
- { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
- ],
- "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
- },
- {
- "name": "scriptFailedToParse",
- "parameters": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
- { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
- { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
- { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
- { "name": "endLine", "type": "integer", "description": "Last line of the script." },
- { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
- { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context." },
- { "name": "hash", "type": "string", "description": "Content hash of the script."},
- { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
- { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
- { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL." },
- { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module." },
- { "name": "length", "type": "integer", "optional": true, "description": "This script length." },
- { "name": "stackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "JavaScript top stack frame of where the script parsed event was triggered if available.", "experimental": true }
- ],
- "description": "Fired when virtual machine fails to parse the script."
- },
- {
- "name": "breakpointResolved",
- "parameters": [
- { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
- { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
- ],
- "description": "Fired when breakpoint is resolved to an actual script and location."
- },
- {
- "name": "paused",
- "parameters": [
- { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
- { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "OOM", "other", "ambiguous" ], "description": "Pause reason." },
- { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
- { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
- { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." },
- { "name": "asyncStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Async stack trace, if any." },
- { "name": "asyncCallStackTraceId", "$ref": "Runtime.StackTraceId", "optional": true, "experimental": true, "description": "Just scheduled async call will have this stack trace as parent stack during async execution. This field is available only after <code>Debugger.stepInto</code> call with <code>breakOnAsynCall</code> flag." }
- ],
- "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
- },
- {
- "name": "resumed",
- "description": "Fired when the virtual machine resumed execution."
- }
- ]
- },
- {
- "domain": "Console",
- "description": "This domain is deprecated - use Runtime or Log instead.",
- "dependencies": ["Runtime"],
- "deprecated": true,
- "types": [
- {
- "id": "ConsoleMessage",
- "type": "object",
- "description": "Console message.",
- "properties": [
- { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
- { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
- { "name": "text", "type": "string", "description": "Message text." },
- { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
- { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
- { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable",
- "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
- },
- {
- "name": "disable",
- "description": "Disables console domain, prevents further console messages from being reported to the client."
- },
- {
- "name": "clearMessages",
- "description": "Does nothing."
- }
- ],
- "events": [
- {
- "name": "messageAdded",
- "parameters": [
- { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
- ],
- "description": "Issued when new console message is added."
- }
- ]
- },
- {
- "domain": "Profiler",
- "dependencies": ["Runtime", "Debugger"],
- "types": [
- {
- "id": "ProfileNode",
- "type": "object",
- "description": "Profile node. Holds callsite information, execution statistics and child nodes.",
- "properties": [
- { "name": "id", "type": "integer", "description": "Unique id of the node." },
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "hitCount", "type": "integer", "optional": true, "description": "Number of samples where this node was on top of the call stack." },
- { "name": "children", "type": "array", "items": { "type": "integer" }, "optional": true, "description": "Child node ids." },
- { "name": "deoptReason", "type": "string", "optional": true, "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
- { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "optional": true, "description": "An array of source position ticks." }
- ]
- },
- {
- "id": "Profile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "nodes", "type": "array", "items": { "$ref": "ProfileNode" }, "description": "The list of profile nodes. First item is the root node." },
- { "name": "startTime", "type": "number", "description": "Profiling start timestamp in microseconds." },
- { "name": "endTime", "type": "number", "description": "Profiling end timestamp in microseconds." },
- { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
- { "name": "timeDeltas", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Time intervals between adjacent samples in microseconds. The first delta is relative to the profile startTime." }
- ]
- },
- {
- "id": "PositionTickInfo",
- "type": "object",
- "description": "Specifies a number of samples attributed to a certain source position.",
- "properties": [
- { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
- { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
- ]
- },
- { "id": "CoverageRange",
- "type": "object",
- "description": "Coverage data for a source range.",
- "properties": [
- { "name": "startOffset", "type": "integer", "description": "JavaScript script source offset for the range start." },
- { "name": "endOffset", "type": "integer", "description": "JavaScript script source offset for the range end." },
- { "name": "count", "type": "integer", "description": "Collected execution count of the source range." }
- ]
- },
- { "id": "FunctionCoverage",
- "type": "object",
- "description": "Coverage data for a JavaScript function.",
- "properties": [
- { "name": "functionName", "type": "string", "description": "JavaScript function name." },
- { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." },
- { "name": "isBlockCoverage", "type": "boolean", "description": "Whether coverage data for this function has block granularity." }
- ]
- },
- {
- "id": "ScriptCoverage",
- "type": "object",
- "description": "Coverage data for a JavaScript script.",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
- ]
- },
- { "id": "TypeObject",
- "type": "object",
- "description": "Describes a type collected during runtime.",
- "properties": [
- { "name": "name", "type": "string", "description": "Name of a type collected with type profiling." }
- ],
- "experimental": true
- },
- { "id": "TypeProfileEntry",
- "type": "object",
- "description": "Source offset and types for a parameter or return value.",
- "properties": [
- { "name": "offset", "type": "integer", "description": "Source offset of the parameter or end of function for return values." },
- { "name": "types", "type": "array", "items": {"$ref": "TypeObject"}, "description": "The types for this parameter or return value."}
- ],
- "experimental": true
- },
- {
- "id": "ScriptTypeProfile",
- "type": "object",
- "description": "Type profile data collected during runtime for a JavaScript script.",
- "properties": [
- { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
- { "name": "url", "type": "string", "description": "JavaScript script name or url." },
- { "name": "entries", "type": "array", "items": { "$ref": "TypeProfileEntry" }, "description": "Type profile entries for parameters and return values of the functions in the script." }
- ],
- "experimental": true
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "setSamplingInterval",
- "parameters": [
- { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
- ],
- "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
- },
- {
- "name": "start"
- },
- {
- "name": "stop",
- "returns": [
- { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
- ]
- },
- {
- "name": "startPreciseCoverage",
- "parameters": [
- { "name": "callCount", "type": "boolean", "optional": true, "description": "Collect accurate call counts beyond simple 'covered' or 'not covered'." },
- { "name": "detailed", "type": "boolean", "optional": true, "description": "Collect block-based coverage." }
- ],
- "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters."
- },
- {
- "name": "stopPreciseCoverage",
- "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code."
- },
- {
- "name": "takePreciseCoverage",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
- ],
- "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started."
- },
- {
- "name": "getBestEffortCoverage",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
- ],
- "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection."
- },
- {
- "name": "startTypeProfile",
- "description": "Enable type profile.",
- "experimental": true
- },
- {
- "name": "stopTypeProfile",
- "description": "Disable type profile. Disabling releases type profile data collected so far.",
- "experimental": true
- },
- {
- "name": "takeTypeProfile",
- "returns": [
- { "name": "result", "type": "array", "items": { "$ref": "ScriptTypeProfile" }, "description": "Type profile for all scripts since startTypeProfile() was turned on." }
- ],
- "description": "Collect type profile.",
- "experimental": true
- }
- ],
- "events": [
- {
- "name": "consoleProfileStarted",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ],
- "description": "Sent when new profile recording is started using console.profile() call."
- },
- {
- "name": "consoleProfileFinished",
- "parameters": [
- { "name": "id", "type": "string" },
- { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
- { "name": "profile", "$ref": "Profile" },
- { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
- ]
- }
- ]
- },
- {
- "domain": "HeapProfiler",
- "dependencies": ["Runtime"],
- "experimental": true,
- "types": [
- {
- "id": "HeapSnapshotObjectId",
- "type": "string",
- "description": "Heap snapshot object id."
- },
- {
- "id": "SamplingHeapProfileNode",
- "type": "object",
- "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
- "properties": [
- { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
- { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
- { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
- ]
- },
- {
- "id": "SamplingHeapProfile",
- "type": "object",
- "description": "Profile.",
- "properties": [
- { "name": "head", "$ref": "SamplingHeapProfileNode" }
- ]
- }
- ],
- "commands": [
- {
- "name": "enable"
- },
- {
- "name": "disable"
- },
- {
- "name": "startTrackingHeapObjects",
- "parameters": [
- { "name": "trackAllocations", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "stopTrackingHeapObjects",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
- ]
- },
- {
- "name": "takeHeapSnapshot",
- "parameters": [
- { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
- ]
- },
- {
- "name": "collectGarbage"
- },
- {
- "name": "getObjectByHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
- { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
- ],
- "returns": [
- { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
- ]
- },
- {
- "name": "addInspectedHeapObject",
- "parameters": [
- { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
- ],
- "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
- },
- {
- "name": "getHeapObjectId",
- "parameters": [
- { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
- ],
- "returns": [
- { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
- ]
- },
- {
- "name": "startSampling",
- "parameters": [
- { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
- ]
- },
- {
- "name": "stopSampling",
- "returns": [
- { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
- ]
- },
- {
- "name": "getSamplingProfile",
- "returns": [
- { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Return the sampling profile being collected." }
- ]
- }
- ],
- "events": [
- {
- "name": "addHeapSnapshotChunk",
- "parameters": [
- { "name": "chunk", "type": "string" }
- ]
- },
- {
- "name": "resetProfiles"
- },
- {
- "name": "reportHeapSnapshotProgress",
- "parameters": [
- { "name": "done", "type": "integer" },
- { "name": "total", "type": "integer" },
- { "name": "finished", "type": "boolean", "optional": true }
- ]
- },
- {
- "name": "lastSeenObjectId",
- "description": "If heap objects tracking has been started then backend regularly sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
- "parameters": [
- { "name": "lastSeenObjectId", "type": "integer" },
- { "name": "timestamp", "type": "number" }
- ]
- },
- {
- "name": "heapStatsUpdate",
- "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
- "parameters": [
- { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
- ]
- }
- ]
- }]
-}
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/src/inspector/js_protocol.pdl
deleted file mode 100644
index c4ff51b060..0000000000
--- a/deps/v8/src/inspector/js_protocol.pdl
+++ /dev/null
@@ -1,1492 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-version
- major 1
- minor 3
-
-# This domain is deprecated - use Runtime or Log instead.
-deprecated domain Console
- depends on Runtime
-
- # Console message.
- type ConsoleMessage extends object
- properties
- # Message source.
- enum source
- xml
- javascript
- network
- console-api
- storage
- appcache
- rendering
- security
- other
- deprecation
- worker
- # Message severity.
- enum level
- log
- warning
- error
- debug
- info
- # Message text.
- string text
- # URL of the message origin.
- optional string url
- # Line number in the resource that generated this message (1-based).
- optional integer line
- # Column number in the resource that generated this message (1-based).
- optional integer column
-
- # Does nothing.
- command clearMessages
-
- # Disables console domain, prevents further console messages from being reported to the client.
- command disable
-
- # Enables console domain, sends the messages collected so far to the client by means of the
- # `messageAdded` notification.
- command enable
-
- # Issued when new console message is added.
- event messageAdded
- parameters
- # Console message that has been added.
- ConsoleMessage message
-
-# Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing
-# breakpoints, stepping through execution, exploring stack traces, etc.
-domain Debugger
- depends on Runtime
-
- # Breakpoint identifier.
- type BreakpointId extends string
-
- # Call frame identifier.
- type CallFrameId extends string
-
- # Location in the source code.
- type Location extends object
- properties
- # Script identifier as reported in the `Debugger.scriptParsed`.
- Runtime.ScriptId scriptId
- # Line number in the script (0-based).
- integer lineNumber
- # Column number in the script (0-based).
- optional integer columnNumber
-
- # Location in the source code.
- experimental type ScriptPosition extends object
- properties
- integer lineNumber
- integer columnNumber
-
- # JavaScript call frame. Array of call frames form the call stack.
- type CallFrame extends object
- properties
- # Call frame identifier. This identifier is only valid while the virtual machine is paused.
- CallFrameId callFrameId
- # Name of the JavaScript function called on this call frame.
- string functionName
- # Location in the source code.
- optional Location functionLocation
- # Location in the source code.
- Location location
- # JavaScript script name or url.
- string url
- # Scope chain for this call frame.
- array of Scope scopeChain
- # `this` object for this call frame.
- Runtime.RemoteObject this
- # The value being returned, if the function is at return point.
- optional Runtime.RemoteObject returnValue
-
- # Scope description.
- type Scope extends object
- properties
- # Scope type.
- enum type
- global
- local
- with
- closure
- catch
- block
- script
- eval
- module
- # Object representing the scope. For `global` and `with` scopes it represents the actual
- # object; for the rest of the scopes, it is artificial transient object enumerating scope
- # variables as its properties.
- Runtime.RemoteObject object
- optional string name
- # Location in the source code where scope starts
- optional Location startLocation
- # Location in the source code where scope ends
- optional Location endLocation
-
- # Search match for resource.
- type SearchMatch extends object
- properties
- # Line number in resource content.
- number lineNumber
- # Line with match content.
- string lineContent
-
- type BreakLocation extends object
- properties
- # Script identifier as reported in the `Debugger.scriptParsed`.
- Runtime.ScriptId scriptId
- # Line number in the script (0-based).
- integer lineNumber
- # Column number in the script (0-based).
- optional integer columnNumber
- optional enum type
- debuggerStatement
- call
- return
-
- # Continues execution until specific location is reached.
- command continueToLocation
- parameters
- # Location to continue to.
- Location location
- optional enum targetCallFrames
- any
- current
-
- # Disables debugger for given page.
- command disable
-
- # Enables debugger for the given page. Clients should not assume that the debugging has been
- # enabled until the result for this command is received.
- command enable
- parameters
- # The maximum size in bytes of collected scripts (not referenced by other heap objects)
- # the debugger can hold. Puts no limit if paramter is omitted.
- experimental optional number maxScriptsCacheSize
- returns
- # Unique identifier of the debugger.
- experimental Runtime.UniqueDebuggerId debuggerId
-
- # Evaluates expression on a given call frame.
- command evaluateOnCallFrame
- parameters
- # Call frame identifier to evaluate on.
- CallFrameId callFrameId
- # Expression to evaluate.
- string expression
- # String object group name to put result into (allows rapid releasing resulting object handles
- # using `releaseObjectGroup`).
- optional string objectGroup
- # Specifies whether command line API should be available to the evaluated expression, defaults
- # to false.
- optional boolean includeCommandLineAPI
- # In silent mode exceptions thrown during evaluation are not reported and do not pause
- # execution. Overrides `setPauseOnException` state.
- optional boolean silent
- # Whether the result is expected to be a JSON object that should be sent by value.
- optional boolean returnByValue
- # Whether preview should be generated for the result.
- experimental optional boolean generatePreview
- # Whether to throw an exception if side effect cannot be ruled out during evaluation.
- optional boolean throwOnSideEffect
- # Terminate execution after timing out (number of milliseconds).
- experimental optional Runtime.TimeDelta timeout
- returns
- # Object wrapper for the evaluation result.
- Runtime.RemoteObject result
- # Exception details.
- optional Runtime.ExceptionDetails exceptionDetails
-
- # Returns possible locations for breakpoint. scriptId in start and end range locations should be
- # the same.
- command getPossibleBreakpoints
- parameters
- # Start of range to search possible breakpoint locations in.
- Location start
- # End of range to search possible breakpoint locations in (excluding). When not specified, end
- # of scripts is used as end of range.
- optional Location end
- # Only consider locations which are in the same (non-nested) function as start.
- optional boolean restrictToFunction
- returns
- # List of the possible breakpoint locations.
- array of BreakLocation locations
-
- # Returns source for the script with given id.
- command getScriptSource
- parameters
- # Id of the script to get source for.
- Runtime.ScriptId scriptId
- returns
- # Script source.
- string scriptSource
-
- # Returns stack trace with given `stackTraceId`.
- experimental command getStackTrace
- parameters
- Runtime.StackTraceId stackTraceId
- returns
- Runtime.StackTrace stackTrace
-
- # Stops on the next JavaScript statement.
- command pause
-
- experimental command pauseOnAsyncCall
- parameters
- # Debugger will pause when async call with given stack trace is started.
- Runtime.StackTraceId parentStackTraceId
-
- # Removes JavaScript breakpoint.
- command removeBreakpoint
- parameters
- BreakpointId breakpointId
-
- # Restarts particular call frame from the beginning.
- command restartFrame
- parameters
- # Call frame identifier to evaluate on.
- CallFrameId callFrameId
- returns
- # New stack trace.
- array of CallFrame callFrames
- # Async stack trace, if any.
- optional Runtime.StackTrace asyncStackTrace
- # Async stack trace, if any.
- experimental optional Runtime.StackTraceId asyncStackTraceId
-
- # Resumes JavaScript execution.
- command resume
-
- # Searches for given string in script content.
- command searchInContent
- parameters
- # Id of the script to search in.
- Runtime.ScriptId scriptId
- # String to search for.
- string query
- # If true, search is case sensitive.
- optional boolean caseSensitive
- # If true, treats string parameter as regex.
- optional boolean isRegex
- returns
- # List of search matches.
- array of SearchMatch result
-
- # Enables or disables async call stacks tracking.
- command setAsyncCallStackDepth
- parameters
- # Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
- # call stacks (default).
- integer maxDepth
-
- # Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in
- # scripts with url matching one of the patterns. VM will try to leave blackboxed script by
- # performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
- experimental command setBlackboxPatterns
- parameters
- # Array of regexps that will be used to check script url for blackbox state.
- array of string patterns
-
- # Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted
- # scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful.
- # Positions array contains positions where blackbox state is changed. First interval isn't
- # blackboxed. Array should be sorted.
- experimental command setBlackboxedRanges
- parameters
- # Id of the script.
- Runtime.ScriptId scriptId
- array of ScriptPosition positions
-
- # Sets JavaScript breakpoint at a given location.
- command setBreakpoint
- parameters
- # Location to set breakpoint in.
- Location location
- # Expression to use as a breakpoint condition. When specified, debugger will only stop on the
- # breakpoint if this expression evaluates to true.
- optional string condition
- returns
- # Id of the created breakpoint for further reference.
- BreakpointId breakpointId
- # Location this breakpoint resolved into.
- Location actualLocation
-
- # Sets instrumentation breakpoint.
- command setInstrumentationBreakpoint
- parameters
- # Instrumentation name.
- enum instrumentation
- beforeScriptExecution
- beforeScriptWithSourceMapExecution
- returns
- # Id of the created breakpoint for further reference.
- BreakpointId breakpointId
-
- # Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this
- # command is issued, all existing parsed scripts will have breakpoints resolved and returned in
- # `locations` property. Further matching script parsing will result in subsequent
- # `breakpointResolved` events issued. This logical breakpoint will survive page reloads.
- command setBreakpointByUrl
- parameters
- # Line number to set breakpoint at.
- integer lineNumber
- # URL of the resources to set breakpoint on.
- optional string url
- # Regex pattern for the URLs of the resources to set breakpoints on. Either `url` or
- # `urlRegex` must be specified.
- optional string urlRegex
- # Script hash of the resources to set breakpoint on.
- optional string scriptHash
- # Offset in the line to set breakpoint at.
- optional integer columnNumber
- # Expression to use as a breakpoint condition. When specified, debugger will only stop on the
- # breakpoint if this expression evaluates to true.
- optional string condition
- returns
- # Id of the created breakpoint for further reference.
- BreakpointId breakpointId
- # List of the locations this breakpoint resolved into upon addition.
- array of Location locations
-
- # Sets JavaScript breakpoint before each call to the given function.
- # If another function was created from the same source as a given one,
- # calling it will also trigger the breakpoint.
- experimental command setBreakpointOnFunctionCall
- parameters
- # Function object id.
- Runtime.RemoteObjectId objectId
- # Expression to use as a breakpoint condition. When specified, debugger will
- # stop on the breakpoint if this expression evaluates to true.
- optional string condition
- returns
- # Id of the created breakpoint for further reference.
- BreakpointId breakpointId
-
- # Activates / deactivates all breakpoints on the page.
- command setBreakpointsActive
- parameters
- # New value for breakpoints active state.
- boolean active
-
- # Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or
- # no exceptions. Initial pause on exceptions state is `none`.
- command setPauseOnExceptions
- parameters
- # Pause on exceptions mode.
- enum state
- none
- uncaught
- all
-
- # Changes return value in top frame. Available only at return break position.
- experimental command setReturnValue
- parameters
- # New return value.
- Runtime.CallArgument newValue
-
- # Edits JavaScript source live.
- command setScriptSource
- parameters
- # Id of the script to edit.
- Runtime.ScriptId scriptId
- # New content of the script.
- string scriptSource
- # If true the change will not actually be applied. Dry run may be used to get result
- # description without actually modifying the code.
- optional boolean dryRun
- returns
- # New stack trace in case editing has happened while VM was stopped.
- optional array of CallFrame callFrames
- # Whether current call stack was modified after applying the changes.
- optional boolean stackChanged
- # Async stack trace, if any.
- optional Runtime.StackTrace asyncStackTrace
- # Async stack trace, if any.
- experimental optional Runtime.StackTraceId asyncStackTraceId
- # Exception details if any.
- optional Runtime.ExceptionDetails exceptionDetails
-
- # Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc).
- command setSkipAllPauses
- parameters
- # New value for skip pauses state.
- boolean skip
-
- # Changes value of variable in a callframe. Object-based scopes are not supported and must be
- # mutated manually.
- command setVariableValue
- parameters
- # 0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch'
- # scope types are allowed. Other scopes could be manipulated manually.
- integer scopeNumber
- # Variable name.
- string variableName
- # New variable value.
- Runtime.CallArgument newValue
- # Id of callframe that holds variable.
- CallFrameId callFrameId
-
- # Steps into the function call.
- command stepInto
- parameters
- # Debugger will issue additional Debugger.paused notification if any async task is scheduled
- # before next pause.
- experimental optional boolean breakOnAsyncCall
-
- # Steps out of the function call.
- command stepOut
-
- # Steps over the statement.
- command stepOver
-
- # Fired when breakpoint is resolved to an actual script and location.
- event breakpointResolved
- parameters
- # Breakpoint unique identifier.
- BreakpointId breakpointId
- # Actual breakpoint location.
- Location location
-
- # Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria.
- event paused
- parameters
- # Call stack the virtual machine stopped on.
- array of CallFrame callFrames
- # Pause reason.
- enum reason
- ambiguous
- assert
- debugCommand
- DOM
- EventListener
- exception
- instrumentation
- OOM
- other
- promiseRejection
- XHR
- # Object containing break-specific auxiliary properties.
- optional object data
- # Hit breakpoints IDs
- optional array of string hitBreakpoints
- # Async stack trace, if any.
- optional Runtime.StackTrace asyncStackTrace
- # Async stack trace, if any.
- experimental optional Runtime.StackTraceId asyncStackTraceId
- # Just scheduled async call will have this stack trace as parent stack during async execution.
- # This field is available only after `Debugger.stepInto` call with `breakOnAsynCall` flag.
- experimental optional Runtime.StackTraceId asyncCallStackTraceId
-
- # Fired when the virtual machine resumed execution.
- event resumed
-
- # Fired when virtual machine fails to parse the script.
- event scriptFailedToParse
- parameters
- # Identifier of the script parsed.
- Runtime.ScriptId scriptId
- # URL or name of the script parsed (if any).
- string url
- # Line offset of the script within the resource with given URL (for script tags).
- integer startLine
- # Column offset of the script within the resource with given URL.
- integer startColumn
- # Last line of the script.
- integer endLine
- # Length of the last line of the script.
- integer endColumn
- # Specifies script creation context.
- Runtime.ExecutionContextId executionContextId
- # Content hash of the script.
- string hash
- # Embedder-specific auxiliary data.
- optional object executionContextAuxData
- # URL of source map associated with script (if any).
- optional string sourceMapURL
- # True, if this script has sourceURL.
- optional boolean hasSourceURL
- # True, if this script is ES6 module.
- optional boolean isModule
- # This script length.
- optional integer length
- # JavaScript top stack frame of where the script parsed event was triggered if available.
- experimental optional Runtime.StackTrace stackTrace
-
- # Fired when virtual machine parses script. This event is also fired for all known and uncollected
- # scripts upon enabling debugger.
- event scriptParsed
- parameters
- # Identifier of the script parsed.
- Runtime.ScriptId scriptId
- # URL or name of the script parsed (if any).
- string url
- # Line offset of the script within the resource with given URL (for script tags).
- integer startLine
- # Column offset of the script within the resource with given URL.
- integer startColumn
- # Last line of the script.
- integer endLine
- # Length of the last line of the script.
- integer endColumn
- # Specifies script creation context.
- Runtime.ExecutionContextId executionContextId
- # Content hash of the script.
- string hash
- # Embedder-specific auxiliary data.
- optional object executionContextAuxData
- # True, if this script is generated as a result of the live edit operation.
- experimental optional boolean isLiveEdit
- # URL of source map associated with script (if any).
- optional string sourceMapURL
- # True, if this script has sourceURL.
- optional boolean hasSourceURL
- # True, if this script is ES6 module.
- optional boolean isModule
- # This script length.
- optional integer length
- # JavaScript top stack frame of where the script parsed event was triggered if available.
- experimental optional Runtime.StackTrace stackTrace
-
-experimental domain HeapProfiler
- depends on Runtime
-
- # Heap snapshot object id.
- type HeapSnapshotObjectId extends string
-
- # Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.
- type SamplingHeapProfileNode extends object
- properties
- # Function location.
- Runtime.CallFrame callFrame
- # Allocations size in bytes for the node excluding children.
- number selfSize
- # Node id. Ids are unique across all profiles collected between startSampling and stopSampling.
- integer id
- # Child nodes.
- array of SamplingHeapProfileNode children
-
- # A single sample from a sampling profile.
- type SamplingHeapProfileSample extends object
- properties
- # Allocation size in bytes attributed to the sample.
- number size
- # Id of the corresponding profile tree node.
- integer nodeId
- # Time-ordered sample ordinal number. It is unique across all profiles retrieved
- # between startSampling and stopSampling.
- number ordinal
-
- # Sampling profile.
- type SamplingHeapProfile extends object
- properties
- SamplingHeapProfileNode head
- array of SamplingHeapProfileSample samples
-
- # Enables console to refer to the node with given id via $x (see Command Line API for more details
- # $x functions).
- command addInspectedHeapObject
- parameters
- # Heap snapshot object id to be accessible by means of $x command line API.
- HeapSnapshotObjectId heapObjectId
-
- command collectGarbage
-
- command disable
-
- command enable
-
- command getHeapObjectId
- parameters
- # Identifier of the object to get heap object id for.
- Runtime.RemoteObjectId objectId
- returns
- # Id of the heap snapshot object corresponding to the passed remote object id.
- HeapSnapshotObjectId heapSnapshotObjectId
-
- command getObjectByHeapObjectId
- parameters
- HeapSnapshotObjectId objectId
- # Symbolic group name that can be used to release multiple objects.
- optional string objectGroup
- returns
- # Evaluation result.
- Runtime.RemoteObject result
-
- command getSamplingProfile
- returns
- # Return the sampling profile being collected.
- SamplingHeapProfile profile
-
- command startSampling
- parameters
- # Average sample interval in bytes. Poisson distribution is used for the intervals. The
- # default value is 32768 bytes.
- optional number samplingInterval
-
- command startTrackingHeapObjects
- parameters
- optional boolean trackAllocations
-
- command stopSampling
- returns
- # Recorded sampling heap profile.
- SamplingHeapProfile profile
-
- command stopTrackingHeapObjects
- parameters
- # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken
- # when the tracking is stopped.
- optional boolean reportProgress
-
- command takeHeapSnapshot
- parameters
- # If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken.
- optional boolean reportProgress
-
- event addHeapSnapshotChunk
- parameters
- string chunk
-
- # If heap objects tracking has been started then backend may send update for one or more fragments
- event heapStatsUpdate
- parameters
- # An array of triplets. Each triplet describes a fragment. The first integer is the fragment
- # index, the second integer is a total count of objects for the fragment, the third integer is
- # a total size of the objects for the fragment.
- array of integer statsUpdate
-
- # If heap objects tracking has been started then backend regularly sends a current value for last
- # seen object id and corresponding timestamp. If the were changes in the heap since last event
- # then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.
- event lastSeenObjectId
- parameters
- integer lastSeenObjectId
- number timestamp
-
- event reportHeapSnapshotProgress
- parameters
- integer done
- integer total
- optional boolean finished
-
- event resetProfiles
-
-domain Profiler
- depends on Runtime
- depends on Debugger
-
- # Profile node. Holds callsite information, execution statistics and child nodes.
- type ProfileNode extends object
- properties
- # Unique id of the node.
- integer id
- # Function location.
- Runtime.CallFrame callFrame
- # Number of samples where this node was on top of the call stack.
- optional integer hitCount
- # Child node ids.
- optional array of integer children
- # The reason of being not optimized. The function may be deoptimized or marked as don't
- # optimize.
- optional string deoptReason
- # An array of source position ticks.
- optional array of PositionTickInfo positionTicks
-
- # Profile.
- type Profile extends object
- properties
- # The list of profile nodes. First item is the root node.
- array of ProfileNode nodes
- # Profiling start timestamp in microseconds.
- number startTime
- # Profiling end timestamp in microseconds.
- number endTime
- # Ids of samples top nodes.
- optional array of integer samples
- # Time intervals between adjacent samples in microseconds. The first delta is relative to the
- # profile startTime.
- optional array of integer timeDeltas
-
- # Specifies a number of samples attributed to a certain source position.
- type PositionTickInfo extends object
- properties
- # Source line number (1-based).
- integer line
- # Number of samples attributed to the source line.
- integer ticks
-
- # Coverage data for a source range.
- type CoverageRange extends object
- properties
- # JavaScript script source offset for the range start.
- integer startOffset
- # JavaScript script source offset for the range end.
- integer endOffset
- # Collected execution count of the source range.
- integer count
-
- # Coverage data for a JavaScript function.
- type FunctionCoverage extends object
- properties
- # JavaScript function name.
- string functionName
- # Source ranges inside the function with coverage data.
- array of CoverageRange ranges
- # Whether coverage data for this function has block granularity.
- boolean isBlockCoverage
-
- # Coverage data for a JavaScript script.
- type ScriptCoverage extends object
- properties
- # JavaScript script id.
- Runtime.ScriptId scriptId
- # JavaScript script name or url.
- string url
- # Functions contained in the script that has coverage data.
- array of FunctionCoverage functions
-
- # Describes a type collected during runtime.
- experimental type TypeObject extends object
- properties
- # Name of a type collected with type profiling.
- string name
-
- # Source offset and types for a parameter or return value.
- experimental type TypeProfileEntry extends object
- properties
- # Source offset of the parameter or end of function for return values.
- integer offset
- # The types for this parameter or return value.
- array of TypeObject types
-
- # Type profile data collected during runtime for a JavaScript script.
- experimental type ScriptTypeProfile extends object
- properties
- # JavaScript script id.
- Runtime.ScriptId scriptId
- # JavaScript script name or url.
- string url
- # Type profile entries for parameters and return values of the functions in the script.
- array of TypeProfileEntry entries
-
- command disable
-
- command enable
-
- # Collect coverage data for the current isolate. The coverage data may be incomplete due to
- # garbage collection.
- command getBestEffortCoverage
- returns
- # Coverage data for the current isolate.
- array of ScriptCoverage result
-
- # Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.
- command setSamplingInterval
- parameters
- # New sampling interval in microseconds.
- integer interval
-
- command start
-
- # Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code
- # coverage may be incomplete. Enabling prevents running optimized code and resets execution
- # counters.
- command startPreciseCoverage
- parameters
- # Collect accurate call counts beyond simple 'covered' or 'not covered'.
- optional boolean callCount
- # Collect block-based coverage.
- optional boolean detailed
-
- # Enable type profile.
- experimental command startTypeProfile
-
- command stop
- returns
- # Recorded profile.
- Profile profile
-
- # Disable precise code coverage. Disabling releases unnecessary execution count records and allows
- # executing optimized code.
- command stopPreciseCoverage
-
- # Disable type profile. Disabling releases type profile data collected so far.
- experimental command stopTypeProfile
-
- # Collect coverage data for the current isolate, and resets execution counters. Precise code
- # coverage needs to have started.
- command takePreciseCoverage
- returns
- # Coverage data for the current isolate.
- array of ScriptCoverage result
-
- # Collect type profile.
- experimental command takeTypeProfile
- returns
- # Type profile for all scripts since startTypeProfile() was turned on.
- array of ScriptTypeProfile result
-
- event consoleProfileFinished
- parameters
- string id
- # Location of console.profileEnd().
- Debugger.Location location
- Profile profile
- # Profile title passed as an argument to console.profile().
- optional string title
-
- # Sent when new profile recording is started using console.profile() call.
- event consoleProfileStarted
- parameters
- string id
- # Location of console.profile().
- Debugger.Location location
- # Profile title passed as an argument to console.profile().
- optional string title
-
-# Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects.
-# Evaluation results are returned as mirror object that expose object type, string representation
-# and unique identifier that can be used for further object reference. Original objects are
-# maintained in memory unless they are either explicitly released or are released along with the
-# other objects in their object group.
-domain Runtime
-
- # Unique script identifier.
- type ScriptId extends string
-
- # Unique object identifier.
- type RemoteObjectId extends string
-
- # Primitive value which cannot be JSON-stringified. Includes values `-0`, `NaN`, `Infinity`,
- # `-Infinity`, and bigint literals.
- type UnserializableValue extends string
-
- # Mirror object referencing original JavaScript object.
- type RemoteObject extends object
- properties
- # Object type.
- enum type
- object
- function
- undefined
- string
- number
- boolean
- symbol
- bigint
- # Object subtype hint. Specified for `object` type values only.
- optional enum subtype
- array
- null
- node
- regexp
- date
- map
- set
- weakmap
- weakset
- iterator
- generator
- error
- proxy
- promise
- typedarray
- arraybuffer
- dataview
- # Object class (constructor) name. Specified for `object` type values only.
- optional string className
- # Remote object value in case of primitive values or JSON values (if it was requested).
- optional any value
- # Primitive value which can not be JSON-stringified does not have `value`, but gets this
- # property.
- optional UnserializableValue unserializableValue
- # String representation of the object.
- optional string description
- # Unique object identifier (for non-primitive values).
- optional RemoteObjectId objectId
- # Preview containing abbreviated property values. Specified for `object` type values only.
- experimental optional ObjectPreview preview
- experimental optional CustomPreview customPreview
-
- experimental type CustomPreview extends object
- properties
- # The JSON-stringified result of formatter.header(object, config) call.
- # It contains json ML array that represents RemoteObject.
- string header
- # If formatter returns true as a result of formatter.hasBody call then bodyGetterId will
- # contain RemoteObjectId for the function that returns result of formatter.body(object, config) call.
- # The result value is json ML array.
- optional RemoteObjectId bodyGetterId
-
- # Object containing abbreviated remote object value.
- experimental type ObjectPreview extends object
- properties
- # Object type.
- enum type
- object
- function
- undefined
- string
- number
- boolean
- symbol
- bigint
- # Object subtype hint. Specified for `object` type values only.
- optional enum subtype
- array
- null
- node
- regexp
- date
- map
- set
- weakmap
- weakset
- iterator
- generator
- error
- # String representation of the object.
- optional string description
- # True iff some of the properties or entries of the original object did not fit.
- boolean overflow
- # List of the properties.
- array of PropertyPreview properties
- # List of the entries. Specified for `map` and `set` subtype values only.
- optional array of EntryPreview entries
-
- experimental type PropertyPreview extends object
- properties
- # Property name.
- string name
- # Object type. Accessor means that the property itself is an accessor property.
- enum type
- object
- function
- undefined
- string
- number
- boolean
- symbol
- accessor
- bigint
- # User-friendly property value string.
- optional string value
- # Nested value preview.
- optional ObjectPreview valuePreview
- # Object subtype hint. Specified for `object` type values only.
- optional enum subtype
- array
- null
- node
- regexp
- date
- map
- set
- weakmap
- weakset
- iterator
- generator
- error
-
- experimental type EntryPreview extends object
- properties
- # Preview of the key. Specified for map-like collection entries.
- optional ObjectPreview key
- # Preview of the value.
- ObjectPreview value
-
- # Object property descriptor.
- type PropertyDescriptor extends object
- properties
- # Property name or symbol description.
- string name
- # The value associated with the property.
- optional RemoteObject value
- # True if the value associated with the property may be changed (data descriptors only).
- optional boolean writable
- # A function which serves as a getter for the property, or `undefined` if there is no getter
- # (accessor descriptors only).
- optional RemoteObject get
- # A function which serves as a setter for the property, or `undefined` if there is no setter
- # (accessor descriptors only).
- optional RemoteObject set
- # True if the type of this property descriptor may be changed and if the property may be
- # deleted from the corresponding object.
- boolean configurable
- # True if this property shows up during enumeration of the properties on the corresponding
- # object.
- boolean enumerable
- # True if the result was thrown during the evaluation.
- optional boolean wasThrown
- # True if the property is owned for the object.
- optional boolean isOwn
- # Property symbol object, if the property is of the `symbol` type.
- optional RemoteObject symbol
-
- # Object internal property descriptor. This property isn't normally visible in JavaScript code.
- type InternalPropertyDescriptor extends object
- properties
- # Conventional property name.
- string name
- # The value associated with the property.
- optional RemoteObject value
-
- # Object private field descriptor.
- experimental type PrivatePropertyDescriptor extends object
- properties
- # Private property name.
- string name
- # The value associated with the private property.
- RemoteObject value
-
- # Represents function call argument. Either remote object id `objectId`, primitive `value`,
- # unserializable primitive value or neither of (for undefined) them should be specified.
- type CallArgument extends object
- properties
- # Primitive value or serializable javascript object.
- optional any value
- # Primitive value which can not be JSON-stringified.
- optional UnserializableValue unserializableValue
- # Remote object handle.
- optional RemoteObjectId objectId
-
- # Id of an execution context.
- type ExecutionContextId extends integer
-
- # Description of an isolated world.
- type ExecutionContextDescription extends object
- properties
- # Unique id of the execution context. It can be used to specify in which execution context
- # script evaluation should be performed.
- ExecutionContextId id
- # Execution context origin.
- string origin
- # Human readable name describing given context.
- string name
- # Embedder-specific auxiliary data.
- optional object auxData
-
- # Detailed information about exception (or error) that was thrown during script compilation or
- # execution.
- type ExceptionDetails extends object
- properties
- # Exception id.
- integer exceptionId
- # Exception text, which should be used together with exception object when available.
- string text
- # Line number of the exception location (0-based).
- integer lineNumber
- # Column number of the exception location (0-based).
- integer columnNumber
- # Script ID of the exception location.
- optional ScriptId scriptId
- # URL of the exception location, to be used when the script was not reported.
- optional string url
- # JavaScript stack trace if available.
- optional StackTrace stackTrace
- # Exception object if available.
- optional RemoteObject exception
- # Identifier of the context where exception happened.
- optional ExecutionContextId executionContextId
-
- # Number of milliseconds since epoch.
- type Timestamp extends number
-
- # Number of milliseconds.
- type TimeDelta extends number
-
- # Stack entry for runtime errors and assertions.
- type CallFrame extends object
- properties
- # JavaScript function name.
- string functionName
- # JavaScript script id.
- ScriptId scriptId
- # JavaScript script name or url.
- string url
- # JavaScript script line number (0-based).
- integer lineNumber
- # JavaScript script column number (0-based).
- integer columnNumber
-
- # Call frames for assertions or error messages.
- type StackTrace extends object
- properties
- # String label of this stack trace. For async traces this may be a name of the function that
- # initiated the async call.
- optional string description
- # JavaScript function name.
- array of CallFrame callFrames
- # Asynchronous JavaScript stack trace that preceded this stack, if available.
- optional StackTrace parent
- # Asynchronous JavaScript stack trace that preceded this stack, if available.
- experimental optional StackTraceId parentId
-
- # Unique identifier of current debugger.
- experimental type UniqueDebuggerId extends string
-
- # If `debuggerId` is set stack trace comes from another debugger and can be resolved there. This
- # allows to track cross-debugger calls. See `Runtime.StackTrace` and `Debugger.paused` for usages.
- experimental type StackTraceId extends object
- properties
- string id
- optional UniqueDebuggerId debuggerId
-
- # Add handler to promise with given promise object id.
- command awaitPromise
- parameters
- # Identifier of the promise.
- RemoteObjectId promiseObjectId
- # Whether the result is expected to be a JSON object that should be sent by value.
- optional boolean returnByValue
- # Whether preview should be generated for the result.
- optional boolean generatePreview
- returns
- # Promise result. Will contain rejected value if promise was rejected.
- RemoteObject result
- # Exception details if stack strace is available.
- optional ExceptionDetails exceptionDetails
-
- # Calls function with given declaration on the given object. Object group of the result is
- # inherited from the target object.
- command callFunctionOn
- parameters
- # Declaration of the function to call.
- string functionDeclaration
- # Identifier of the object to call function on. Either objectId or executionContextId should
- # be specified.
- optional RemoteObjectId objectId
- # Call arguments. All call arguments must belong to the same JavaScript world as the target
- # object.
- optional array of CallArgument arguments
- # In silent mode exceptions thrown during evaluation are not reported and do not pause
- # execution. Overrides `setPauseOnException` state.
- optional boolean silent
- # Whether the result is expected to be a JSON object which should be sent by value.
- optional boolean returnByValue
- # Whether preview should be generated for the result.
- experimental optional boolean generatePreview
- # Whether execution should be treated as initiated by user in the UI.
- optional boolean userGesture
- # Whether execution should `await` for resulting value and return once awaited promise is
- # resolved.
- optional boolean awaitPromise
- # Specifies execution context which global object will be used to call function on. Either
- # executionContextId or objectId should be specified.
- optional ExecutionContextId executionContextId
- # Symbolic group name that can be used to release multiple objects. If objectGroup is not
- # specified and objectId is, objectGroup will be inherited from object.
- optional string objectGroup
- returns
- # Call result.
- RemoteObject result
- # Exception details.
- optional ExceptionDetails exceptionDetails
-
- # Compiles expression.
- command compileScript
- parameters
- # Expression to compile.
- string expression
- # Source url to be set for the script.
- string sourceURL
- # Specifies whether the compiled script should be persisted.
- boolean persistScript
- # Specifies in which execution context to perform script run. If the parameter is omitted the
- # evaluation will be performed in the context of the inspected page.
- optional ExecutionContextId executionContextId
- returns
- # Id of the script.
- optional ScriptId scriptId
- # Exception details.
- optional ExceptionDetails exceptionDetails
-
- # Disables reporting of execution contexts creation.
- command disable
-
- # Discards collected exceptions and console API calls.
- command discardConsoleEntries
-
- # Enables reporting of execution contexts creation by means of `executionContextCreated` event.
- # When the reporting gets enabled the event will be sent immediately for each existing execution
- # context.
- command enable
-
- # Evaluates expression on global object.
- command evaluate
- parameters
- # Expression to evaluate.
- string expression
- # Symbolic group name that can be used to release multiple objects.
- optional string objectGroup
- # Determines whether Command Line API should be available during the evaluation.
- optional boolean includeCommandLineAPI
- # In silent mode exceptions thrown during evaluation are not reported and do not pause
- # execution. Overrides `setPauseOnException` state.
- optional boolean silent
- # Specifies in which execution context to perform evaluation. If the parameter is omitted the
- # evaluation will be performed in the context of the inspected page.
- optional ExecutionContextId contextId
- # Whether the result is expected to be a JSON object that should be sent by value.
- optional boolean returnByValue
- # Whether preview should be generated for the result.
- experimental optional boolean generatePreview
- # Whether execution should be treated as initiated by user in the UI.
- optional boolean userGesture
- # Whether execution should `await` for resulting value and return once awaited promise is
- # resolved.
- optional boolean awaitPromise
- # Whether to throw an exception if side effect cannot be ruled out during evaluation.
- experimental optional boolean throwOnSideEffect
- # Terminate execution after timing out (number of milliseconds).
- experimental optional TimeDelta timeout
- returns
- # Evaluation result.
- RemoteObject result
- # Exception details.
- optional ExceptionDetails exceptionDetails
-
- # Returns the isolate id.
- experimental command getIsolateId
- returns
- # The isolate id.
- string id
-
- # Returns the JavaScript heap usage.
- # It is the total usage of the corresponding isolate not scoped to a particular Runtime.
- experimental command getHeapUsage
- returns
- # Used heap size in bytes.
- number usedSize
- # Allocated heap size in bytes.
- number totalSize
-
- # Returns properties of a given object. Object group of the result is inherited from the target
- # object.
- command getProperties
- parameters
- # Identifier of the object to return properties for.
- RemoteObjectId objectId
- # If true, returns properties belonging only to the element itself, not to its prototype
- # chain.
- optional boolean ownProperties
- # If true, returns accessor properties (with getter/setter) only; internal properties are not
- # returned either.
- experimental optional boolean accessorPropertiesOnly
- # Whether preview should be generated for the results.
- experimental optional boolean generatePreview
- returns
- # Object properties.
- array of PropertyDescriptor result
- # Internal object properties (only of the element itself).
- optional array of InternalPropertyDescriptor internalProperties
- # Object private properties.
- experimental optional array of PrivatePropertyDescriptor privateProperties
- # Exception details.
- optional ExceptionDetails exceptionDetails
-
- # Returns all let, const and class variables from global scope.
- command globalLexicalScopeNames
- parameters
- # Specifies in which execution context to lookup global scope variables.
- optional ExecutionContextId executionContextId
- returns
- array of string names
-
- command queryObjects
- parameters
- # Identifier of the prototype to return objects for.
- RemoteObjectId prototypeObjectId
- # Symbolic group name that can be used to release the results.
- optional string objectGroup
- returns
- # Array with objects.
- RemoteObject objects
-
- # Releases remote object with given id.
- command releaseObject
- parameters
- # Identifier of the object to release.
- RemoteObjectId objectId
-
- # Releases all remote objects that belong to a given group.
- command releaseObjectGroup
- parameters
- # Symbolic object group name.
- string objectGroup
-
- # Tells inspected instance to run if it was waiting for debugger to attach.
- command runIfWaitingForDebugger
-
- # Runs script with given id in a given context.
- command runScript
- parameters
- # Id of the script to run.
- ScriptId scriptId
- # Specifies in which execution context to perform script run. If the parameter is omitted the
- # evaluation will be performed in the context of the inspected page.
- optional ExecutionContextId executionContextId
- # Symbolic group name that can be used to release multiple objects.
- optional string objectGroup
- # In silent mode exceptions thrown during evaluation are not reported and do not pause
- # execution. Overrides `setPauseOnException` state.
- optional boolean silent
- # Determines whether Command Line API should be available during the evaluation.
- optional boolean includeCommandLineAPI
- # Whether the result is expected to be a JSON object which should be sent by value.
- optional boolean returnByValue
- # Whether preview should be generated for the result.
- optional boolean generatePreview
- # Whether execution should `await` for resulting value and return once awaited promise is
- # resolved.
- optional boolean awaitPromise
- returns
- # Run result.
- RemoteObject result
- # Exception details.
- optional ExceptionDetails exceptionDetails
-
- # Enables or disables async call stacks tracking.
- command setAsyncCallStackDepth
- redirect Debugger
- parameters
- # Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async
- # call stacks (default).
- integer maxDepth
-
- experimental command setCustomObjectFormatterEnabled
- parameters
- boolean enabled
-
- experimental command setMaxCallStackSizeToCapture
- parameters
- integer size
-
- # Terminate current or next JavaScript execution.
- # Will cancel the termination when the outer-most script execution ends.
- experimental command terminateExecution
-
- # If executionContextId is empty, adds binding with the given name on the
- # global objects of all inspected contexts, including those created later,
- # bindings survive reloads.
- # If executionContextId is specified, adds binding only on global object of
- # given execution context.
- # Binding function takes exactly one argument, this argument should be string,
- # in case of any other input, function throws an exception.
- # Each binding function call produces Runtime.bindingCalled notification.
- experimental command addBinding
- parameters
- string name
- optional ExecutionContextId executionContextId
-
- # This method does not remove binding function from global object but
- # unsubscribes current runtime agent from Runtime.bindingCalled notifications.
- experimental command removeBinding
- parameters
- string name
-
- # Notification is issued every time when binding is called.
- experimental event bindingCalled
- parameters
- string name
- string payload
- # Identifier of the context where the call was made.
- ExecutionContextId executionContextId
-
- # Issued when console API was called.
- event consoleAPICalled
- parameters
- # Type of the call.
- enum type
- log
- debug
- info
- error
- warning
- dir
- dirxml
- table
- trace
- clear
- startGroup
- startGroupCollapsed
- endGroup
- assert
- profile
- profileEnd
- count
- timeEnd
- # Call arguments.
- array of RemoteObject args
- # Identifier of the context where the call was made.
- ExecutionContextId executionContextId
- # Call timestamp.
- Timestamp timestamp
- # Stack trace captured when the call was made. The async stack chain is automatically reported for
- # the following call types: `assert`, `error`, `trace`, `warning`. For other types the async call
- # chain can be retrieved using `Debugger.getStackTrace` and `stackTrace.parentId` field.
- optional StackTrace stackTrace
- # Console context descriptor for calls on non-default console context (not console.*):
- # 'anonymous#unique-logger-id' for call on unnamed context, 'name#unique-logger-id' for call
- # on named context.
- experimental optional string context
-
- # Issued when unhandled exception was revoked.
- event exceptionRevoked
- parameters
- # Reason describing why exception was revoked.
- string reason
- # The id of revoked exception, as reported in `exceptionThrown`.
- integer exceptionId
-
- # Issued when exception was thrown and unhandled.
- event exceptionThrown
- parameters
- # Timestamp of the exception.
- Timestamp timestamp
- ExceptionDetails exceptionDetails
-
- # Issued when new execution context is created.
- event executionContextCreated
- parameters
- # A newly created execution context.
- ExecutionContextDescription context
-
- # Issued when execution context is destroyed.
- event executionContextDestroyed
- parameters
- # Id of the destroyed context
- ExecutionContextId executionContextId
-
- # Issued when all executionContexts were cleared in browser
- event executionContextsCleared
-
- # Issued when object should be inspected (for example, as a result of inspect() command line API
- # call).
- event inspectRequested
- parameters
- RemoteObject object
- object hints
-
-# This domain is deprecated.
-deprecated domain Schema
-
- # Description of the protocol domain.
- type Domain extends object
- properties
- # Domain name.
- string name
- # Domain version.
- string version
-
- # Returns supported domains.
- command getDomains
- returns
- # List of supported domains.
- array of Domain domains
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index acf0159f27..3a91169ac8 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -200,6 +200,23 @@ String16 String16::fromUTF8(const char* stringStart, size_t length) {
return String16(UTF8ToUTF16(stringStart, length));
}
+String16 String16::fromUTF16LE(const UChar* stringStart, size_t length) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ // Need to flip the byte order on big endian machines.
+ String16Builder builder;
+ builder.reserveCapacity(length);
+ for (size_t i = 0; i < length; i++) {
+ const UChar utf16be_char =
+ stringStart[i] << 8 | (stringStart[i] >> 8 & 0x00FF);
+ builder.append(utf16be_char);
+ }
+ return builder.toString();
+#else
+ // No need to do anything on little endian machines.
+ return String16(stringStart, length);
+#endif // V8_TARGET_BIG_ENDIAN
+}
+
std::string String16::utf8() const {
return UTF16ToUTF8(m_impl.data(), m_impl.size());
}
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 1b475a10a6..c1dd5cb929 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -26,7 +26,7 @@ class String16 {
String16(const String16&) V8_NOEXCEPT = default;
String16(String16&&) V8_NOEXCEPT = default;
String16(const UChar* characters, size_t size);
- String16(const UChar* characters); // NOLINT(runtime/explicit)
+ V8_EXPORT String16(const UChar* characters); // NOLINT(runtime/explicit)
String16(const char* characters); // NOLINT(runtime/explicit)
String16(const char* characters, size_t size);
explicit String16(const std::basic_string<UChar>& impl);
@@ -66,8 +66,13 @@ class String16 {
}
// Convenience methods.
- std::string utf8() const;
- static String16 fromUTF8(const char* stringStart, size_t length);
+ V8_EXPORT std::string utf8() const;
+ V8_EXPORT static String16 fromUTF8(const char* stringStart, size_t length);
+
+ // Instantiates a String16 in native endianness from UTF16 LE.
+ // On Big endian architectures, byte order needs to be flipped.
+ V8_EXPORT static String16 fromUTF16LE(const UChar* stringStart,
+ size_t length);
std::size_t hash() const {
if (!hash_code) {
@@ -91,6 +96,10 @@ class String16 {
inline String16 operator+(const String16& other) const {
return String16(m_impl + other.m_impl);
}
+ inline String16& operator+=(const String16& other) {
+ m_impl += other.m_impl;
+ return *this;
+ }
// Defined later, since it uses the String16Builder.
template <typename... T>
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index e81c04d66f..20c8951e2a 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -126,12 +126,6 @@ std::unique_ptr<protocol::Value> StringUtil::parseJSON(const String16& string) {
}
// static
-std::unique_ptr<protocol::Value> StringUtil::parseProtocolMessage(
- const ProtocolMessage& message) {
- return parseJSON(message.json);
-}
-
-// static
ProtocolMessage StringUtil::jsonToMessage(String message) {
ProtocolMessage result;
result.json = std::move(message);
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index a9ce4ff424..513f436136 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -45,26 +45,33 @@ class StringUtil {
return s.find(needle);
}
static const size_t kNotFound = String::kNotFound;
- static void builderAppend(StringBuilder& builder, const String& s) {
+ static void builderAppend(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ const String& s) {
builder.append(s);
}
- static void builderAppend(StringBuilder& builder, UChar c) {
+ static void builderAppend(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ UChar c) {
builder.append(c);
}
- static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
+ static void builderAppend(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ const char* s, size_t len) {
builder.append(s, len);
}
static void builderAppendQuotedString(StringBuilder&, const String&);
- static void builderReserve(StringBuilder& builder, size_t capacity) {
+ static void builderReserve(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ size_t capacity) {
builder.reserveCapacity(capacity);
}
- static String builderToString(StringBuilder& builder) {
+ static String builderToString(
+ StringBuilder& builder) { // NOLINT(runtime/references)
return builder.toString();
}
static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
- static std::unique_ptr<protocol::Value> parseProtocolMessage(
- const ProtocolMessage&);
static ProtocolMessage jsonToMessage(String message);
static ProtocolMessage binaryToMessage(std::vector<uint8_t> message);
@@ -76,6 +83,10 @@ class StringUtil {
return String16(data, length);
}
+ static String fromUTF16LE(const uint16_t* data, size_t length) {
+ return String16::fromUTF16LE(data, length);
+ }
+
static const uint8_t* CharactersLatin1(const String& s) { return nullptr; }
static const uint8_t* CharactersUTF8(const String& s) { return nullptr; }
static const uint16_t* CharactersUTF16(const String& s) {
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 4b7f181e35..458e4d4027 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -257,8 +257,8 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = inspectedContext->context();
- std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>> args =
- protocol::Array<protocol::Runtime::RemoteObject>::create();
+ auto args =
+ v8::base::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>();
v8::Local<v8::Value> value = m_arguments[0]->Get(isolate);
if (value->IsObject() && m_type == ConsoleAPIType::kTable &&
@@ -282,7 +282,7 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
inspectedContext = inspector->getContext(contextGroupId, contextId);
if (!inspectedContext) return nullptr;
if (wrapped) {
- args->addItem(std::move(wrapped));
+ args->emplace_back(std::move(wrapped));
} else {
args = nullptr;
}
@@ -297,7 +297,7 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
args = nullptr;
break;
}
- args->addItem(std::move(wrapped));
+ args->emplace_back(std::move(wrapped));
}
}
return args;
@@ -341,14 +341,15 @@ void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
arguments = wrapArguments(session, generatePreview);
if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
if (!arguments) {
- arguments = protocol::Array<protocol::Runtime::RemoteObject>::create();
+ arguments = v8::base::make_unique<
+ protocol::Array<protocol::Runtime::RemoteObject>>();
if (!m_message.isEmpty()) {
std::unique_ptr<protocol::Runtime::RemoteObject> messageArg =
protocol::Runtime::RemoteObject::create()
.setType(protocol::Runtime::RemoteObject::TypeEnum::String)
.build();
messageArg->setValue(protocol::StringValue::create(m_message));
- arguments->addItem(std::move(messageArg));
+ arguments->emplace_back(std::move(messageArg));
}
}
Maybe<String16> consoleContext;
@@ -426,9 +427,11 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
message->m_v8Size +=
v8::debug::EstimatedValueSize(isolate, arguments.at(i));
}
- if (arguments.size())
- message->m_message =
- V8ValueStringBuilder::toString(arguments[0], v8Context);
+ for (size_t i = 0, num_args = arguments.size(); i < num_args; ++i) {
+ if (i) message->m_message += String16(" ");
+ message->m_message +=
+ V8ValueStringBuilder::toString(arguments[i], v8Context);
+ }
v8::Isolate::MessageErrorLevel clientLevel = v8::Isolate::kMessageInfo;
if (type == ConsoleAPIType::kDebug || type == ConsoleAPIType::kCount ||
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 37b1d5c7a9..0f476f2316 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -496,11 +496,11 @@ void V8Console::valuesCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
info.GetReturnValue().Set(values);
}
-static void setFunctionBreakpoint(ConsoleHelper& helper, int sessionId,
- v8::Local<v8::Function> function,
- V8DebuggerAgentImpl::BreakpointSource source,
- v8::Local<v8::String> condition,
- bool enable) {
+static void setFunctionBreakpoint(
+ ConsoleHelper& helper, // NOLINT(runtime/references)
+ int sessionId, v8::Local<v8::Function> function,
+ V8DebuggerAgentImpl::BreakpointSource source,
+ v8::Local<v8::String> condition, bool enable) {
V8InspectorSessionImpl* session = helper.session(sessionId);
if (session == nullptr) return;
if (!session->debuggerAgent()->enabled()) return;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 3301838587..e5458823ea 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -262,7 +262,7 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type) {
Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
InjectedScript* injectedScript,
std::unique_ptr<Array<Scope>>* scopes) {
- *scopes = Array<Scope>::create();
+ *scopes = v8::base::make_unique<Array<Scope>>();
if (!injectedScript) return Response::OK();
if (iterator->Done()) return Response::OK();
@@ -299,7 +299,7 @@ Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
.setColumnNumber(end.GetColumnNumber())
.build());
}
- (*scopes)->addItem(std::move(scope));
+ (*scopes)->emplace_back(std::move(scope));
}
return Response::OK();
}
@@ -472,7 +472,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
Maybe<int> optionalColumnNumber, Maybe<String16> optionalCondition,
String16* outBreakpointId,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
- *locations = Array<protocol::Debugger::Location>::create();
+ *locations = v8::base::make_unique<Array<protocol::Debugger::Location>>();
int specified = (optionalURL.isJust() ? 1 : 0) +
(optionalURLRegex.isJust() ? 1 : 0) +
@@ -539,7 +539,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
if (location && type != BreakpointType::kByUrlRegex) {
hint = breakpointHint(*script.second, lineNumber, columnNumber);
}
- if (location) (*locations)->addItem(std::move(location));
+ if (location) (*locations)->emplace_back(std::move(location));
}
breakpoints->setString(breakpointId, condition);
if (!hint.isEmpty()) {
@@ -708,7 +708,8 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
v8Start, v8End, restrictToFunction.fromMaybe(false), &v8Locations);
}
- *locations = protocol::Array<protocol::Debugger::BreakLocation>::create();
+ *locations = v8::base::make_unique<
+ protocol::Array<protocol::Debugger::BreakLocation>>();
for (size_t i = 0; i < v8Locations.size(); ++i) {
std::unique_ptr<protocol::Debugger::BreakLocation> breakLocation =
protocol::Debugger::BreakLocation::create()
@@ -719,7 +720,7 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
if (v8Locations[i].type() != v8::debug::kCommonBreakLocation) {
breakLocation->setType(breakLocationType(v8Locations[i].type()));
}
- (*locations)->addItem(std::move(breakLocation));
+ (*locations)->emplace_back(std::move(breakLocation));
}
return Response::OK();
}
@@ -871,13 +872,11 @@ Response V8DebuggerAgentImpl::searchInContent(
if (it == m_scripts.end())
return Response::Error("No script for id: " + scriptId);
- std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
- searchInTextByLinesImpl(m_session, it->second->source(0), query,
- optionalCaseSensitive.fromMaybe(false),
- optionalIsRegex.fromMaybe(false));
- *results = protocol::Array<protocol::Debugger::SearchMatch>::create();
- for (size_t i = 0; i < matches.size(); ++i)
- (*results)->addItem(std::move(matches[i]));
+ *results =
+ v8::base::make_unique<protocol::Array<protocol::Debugger::SearchMatch>>(
+ searchInTextByLinesImpl(m_session, it->second->source(0), query,
+ optionalCaseSensitive.fromMaybe(false),
+ optionalIsRegex.fromMaybe(false)));
return Response::OK();
}
@@ -1190,7 +1189,7 @@ Response V8DebuggerAgentImpl::setAsyncCallStackDepth(int depth) {
Response V8DebuggerAgentImpl::setBlackboxPatterns(
std::unique_ptr<protocol::Array<String16>> patterns) {
- if (!patterns->length()) {
+ if (patterns->empty()) {
m_blackboxPattern = nullptr;
resetBlackboxedStateCache();
m_state->remove(DebuggerAgentState::blackboxPattern);
@@ -1199,11 +1198,11 @@ Response V8DebuggerAgentImpl::setBlackboxPatterns(
String16Builder patternBuilder;
patternBuilder.append('(');
- for (size_t i = 0; i < patterns->length() - 1; ++i) {
- patternBuilder.append(patterns->get(i));
+ for (size_t i = 0; i < patterns->size() - 1; ++i) {
+ patternBuilder.append((*patterns)[i]);
patternBuilder.append("|");
}
- patternBuilder.append(patterns->get(patterns->length() - 1));
+ patternBuilder.append(patterns->back());
patternBuilder.append(')');
String16 pattern = patternBuilder.toString();
Response response = setBlackboxPattern(pattern);
@@ -1236,16 +1235,16 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
if (it == m_scripts.end())
return Response::Error("No script with passed id.");
- if (!inPositions->length()) {
+ if (inPositions->empty()) {
m_blackboxedPositions.erase(scriptId);
it->second->resetBlackboxedStateCache();
return Response::OK();
}
std::vector<std::pair<int, int>> positions;
- positions.reserve(inPositions->length());
- for (size_t i = 0; i < inPositions->length(); ++i) {
- protocol::Debugger::ScriptPosition* position = inPositions->get(i);
+ positions.reserve(inPositions->size());
+ for (const std::unique_ptr<protocol::Debugger::ScriptPosition>& position :
+ *inPositions) {
if (position->getLineNumber() < 0)
return Response::Error("Position missing 'line' or 'line' < 0.");
if (position->getColumnNumber() < 0)
@@ -1271,11 +1270,11 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
Response V8DebuggerAgentImpl::currentCallFrames(
std::unique_ptr<Array<CallFrame>>* result) {
if (!isPaused()) {
- *result = Array<CallFrame>::create();
+ *result = v8::base::make_unique<Array<CallFrame>>();
return Response::OK();
}
v8::HandleScope handles(m_isolate);
- *result = Array<CallFrame>::create();
+ *result = v8::base::make_unique<Array<CallFrame>>();
auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
int frameOrdinal = 0;
for (; !iterator->Done(); iterator->Advance(), frameOrdinal++) {
@@ -1354,7 +1353,7 @@ Response V8DebuggerAgentImpl::currentCallFrames(
if (!res.isSuccess()) return res;
frame->setReturnValue(std::move(value));
}
- (*result)->addItem(std::move(frame));
+ (*result)->emplace_back(std::move(frame));
}
return Response::OK();
}
@@ -1603,7 +1602,7 @@ void V8DebuggerAgentImpl::didPause(
}
}
- std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
+ auto hitBreakpointIds = v8::base::make_unique<Array<String16>>();
for (const auto& id : hitBreakpoints) {
auto it = m_breakpointsOnScriptRun.find(id);
@@ -1619,7 +1618,7 @@ void V8DebuggerAgentImpl::didPause(
continue;
}
const String16& breakpointId = breakpointIterator->second;
- hitBreakpointIds->addItem(breakpointId);
+ hitBreakpointIds->emplace_back(breakpointId);
BreakpointType type;
parseBreakpointId(breakpointId, &type);
if (type != BreakpointType::kDebugCommand) continue;
@@ -1655,7 +1654,8 @@ void V8DebuggerAgentImpl::didPause(
std::unique_ptr<Array<CallFrame>> protocolCallFrames;
Response response = currentCallFrames(&protocolCallFrames);
- if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
+ if (!response.isSuccess())
+ protocolCallFrames = v8::base::make_unique<Array<CallFrame>>();
m_frontend.paused(std::move(protocolCallFrames), breakReason,
std::move(breakAuxData), std::move(hitBreakpointIds),
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index fe7d570942..b83eafc96a 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -4,7 +4,7 @@
#include "src/inspector/v8-debugger-script.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-agent-impl.h"
@@ -44,7 +44,7 @@ String16 calculateHash(v8::Isolate* isolate, v8::Local<v8::String> source) {
size_t sizeInBytes = sizeof(UChar) * written;
data = reinterpret_cast<const uint32_t*>(buffer.get());
for (size_t i = 0; i < sizeInBytes / 4; ++i) {
- uint32_t d = v8::internal::ReadUnalignedUInt32(
+ uint32_t d = v8::base::ReadUnalignedValue<uint32_t>(
reinterpret_cast<v8::internal::Address>(data + i));
#if V8_TARGET_LITTLE_ENDIAN
uint32_t v = d;
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index bc0c9d8cf6..5ddc375a80 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -24,7 +24,7 @@ static const int kMaxAsyncTaskStacks = 128 * 1024;
static const int kNoBreakpointId = 0;
template <typename Map>
-void cleanupExpiredWeakPointers(Map& map) {
+void cleanupExpiredWeakPointers(Map& map) { // NOLINT(runtime/references)
for (auto it = map.begin(); it != map.end();) {
if (it->second.expired()) {
it = map.erase(it);
@@ -42,6 +42,7 @@ class MatchPrototypePredicate : public v8::debug::QueryObjectPredicate {
: m_inspector(inspector), m_context(context), m_prototype(prototype) {}
bool Filter(v8::Local<v8::Object> object) override {
+ if (object->IsModuleNamespaceObject()) return false;
v8::Local<v8::Context> objectContext =
v8::debug::GetCreationContext(object);
if (objectContext != m_context) return false;
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index b1d60877fe..fcee8a6ef3 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-heap-profiler-agent-impl.h"
+#include "src/base/template-utils.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
@@ -127,12 +128,11 @@ class HeapStatsStream final : public v8::OutputStream {
WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* updateData,
int count) override {
DCHECK_GT(count, 0);
- std::unique_ptr<protocol::Array<int>> statsDiff =
- protocol::Array<int>::create();
+ auto statsDiff = v8::base::make_unique<protocol::Array<int>>();
for (int i = 0; i < count; ++i) {
- statsDiff->addItem(updateData[i].index);
- statsDiff->addItem(updateData[i].count);
- statsDiff->addItem(updateData[i].size);
+ statsDiff->emplace_back(updateData[i].index);
+ statsDiff->emplace_back(updateData[i].count);
+ statsDiff->emplace_back(updateData[i].size);
}
m_frontend->heapStatsUpdate(std::move(statsDiff));
return kContinue;
@@ -337,10 +337,10 @@ namespace {
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode>
buildSampingHeapProfileNode(v8::Isolate* isolate,
const v8::AllocationProfile::Node* node) {
- auto children = protocol::Array<
- protocol::HeapProfiler::SamplingHeapProfileNode>::create();
+ auto children = v8::base::make_unique<
+ protocol::Array<protocol::HeapProfiler::SamplingHeapProfileNode>>();
for (const auto* child : node->children)
- children->addItem(buildSampingHeapProfileNode(isolate, child));
+ children->emplace_back(buildSampingHeapProfileNode(isolate, child));
size_t selfSize = 0;
for (const auto& allocation : node->allocations)
selfSize += allocation.size * allocation.count;
@@ -384,14 +384,15 @@ Response V8HeapProfilerAgentImpl::getSamplingProfile(
if (!v8Profile)
return Response::Error("V8 sampling heap profiler was not started.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
- auto samples = protocol::Array<
- protocol::HeapProfiler::SamplingHeapProfileSample>::create();
+ auto samples = v8::base::make_unique<
+ protocol::Array<protocol::HeapProfiler::SamplingHeapProfileSample>>();
for (const auto& sample : v8Profile->GetSamples()) {
- samples->addItem(protocol::HeapProfiler::SamplingHeapProfileSample::create()
- .setSize(sample.size * sample.count)
- .setNodeId(sample.node_id)
- .setOrdinal(static_cast<double>(sample.sample_id))
- .build());
+ samples->emplace_back(
+ protocol::HeapProfiler::SamplingHeapProfileSample::create()
+ .setSize(sample.size * sample.count)
+ .setNodeId(sample.node_id)
+ .setOrdinal(static_cast<double>(sample.sample_id))
+ .build());
}
*profile = protocol::HeapProfiler::SamplingHeapProfile::create()
.setHead(buildSampingHeapProfileNode(m_isolate, root))
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 4242abb64a..fdfb41924c 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -161,53 +161,28 @@ protocol::DictionaryValue* V8InspectorSessionImpl::agentState(
return state;
}
-namespace {
-
-class MessageBuffer : public StringBuffer {
- public:
- static std::unique_ptr<MessageBuffer> create(
- std::unique_ptr<protocol::Serializable> message, bool binary) {
- return std::unique_ptr<MessageBuffer>(
- new MessageBuffer(std::move(message), binary));
- }
-
- const StringView& string() override {
- if (!m_serialized) {
- if (m_binary) {
- // Encode binary response as an 8bit string buffer.
- m_serialized.reset(
- new BinaryStringBuffer(m_message->serializeToBinary()));
- } else {
- m_serialized =
- StringBuffer::create(toStringView(m_message->serializeToJSON()));
- }
- m_message.reset(nullptr);
- }
- return m_serialized->string();
- }
-
- private:
- explicit MessageBuffer(std::unique_ptr<protocol::Serializable> message,
- bool binary)
- : m_message(std::move(message)), m_binary(binary) {}
-
- std::unique_ptr<protocol::Serializable> m_message;
- std::unique_ptr<StringBuffer> m_serialized;
- bool m_binary;
-};
-
-} // namespace
+std::unique_ptr<StringBuffer> V8InspectorSessionImpl::serializeForFrontend(
+ std::unique_ptr<protocol::Serializable> message) {
+ std::vector<uint8_t> cbor = message->serializeToBinary();
+ if (use_binary_protocol_)
+ return std::unique_ptr<StringBuffer>(
+ new BinaryStringBuffer(std::move(cbor)));
+ std::vector<uint8_t> json;
+ IPEStatus status = ConvertCBORToJSON(SpanFrom(cbor), &json);
+ DCHECK(status.ok());
+ USE(status);
+ String16 string16(reinterpret_cast<const char*>(json.data()), json.size());
+ return StringBufferImpl::adopt(string16);
+}
void V8InspectorSessionImpl::sendProtocolResponse(
int callId, std::unique_ptr<protocol::Serializable> message) {
- m_channel->sendResponse(
- callId, MessageBuffer::create(std::move(message), use_binary_protocol_));
+ m_channel->sendResponse(callId, serializeForFrontend(std::move(message)));
}
void V8InspectorSessionImpl::sendProtocolNotification(
std::unique_ptr<protocol::Serializable> message) {
- m_channel->sendNotification(
- MessageBuffer::create(std::move(message), use_binary_protocol_));
+ m_channel->sendNotification(serializeForFrontend(std::move(message)));
}
void V8InspectorSessionImpl::fallThrough(
@@ -357,20 +332,30 @@ void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
void V8InspectorSessionImpl::dispatchProtocolMessage(
const StringView& message) {
- bool binary_protocol = IsCBORMessage(message);
- if (binary_protocol) {
+ using ::v8_inspector_protocol_encoding::span;
+ using ::v8_inspector_protocol_encoding::SpanFrom;
+ span<uint8_t> cbor;
+ std::vector<uint8_t> converted_cbor;
+ if (IsCBORMessage(message)) {
use_binary_protocol_ = true;
m_state->setBoolean("use_binary_protocol", true);
- }
-
- int callId;
- std::unique_ptr<protocol::Value> parsed_message;
- if (binary_protocol) {
- parsed_message = protocol::Value::parseBinary(
- message.characters8(), static_cast<unsigned>(message.length()));
+ cbor = span<uint8_t>(message.characters8(), message.length());
} else {
- parsed_message = protocol::StringUtil::parseJSON(message);
+ if (message.is8Bit()) {
+ // We're ignoring the return value of these conversion functions
+ // intentionally. It means the |parsed_message| below will be nullptr.
+ ConvertJSONToCBOR(span<uint8_t>(message.characters8(), message.length()),
+ &converted_cbor);
+ } else {
+ ConvertJSONToCBOR(
+ span<uint16_t>(message.characters16(), message.length()),
+ &converted_cbor);
+ }
+ cbor = SpanFrom(converted_cbor);
}
+ int callId;
+ std::unique_ptr<protocol::Value> parsed_message =
+ protocol::Value::parseBinary(cbor.data(), cbor.size());
String16 method;
if (m_dispatcher.parseCommand(parsed_message.get(), &callId, &method)) {
// Pass empty string instead of the actual message to save on a conversion.
@@ -380,14 +365,6 @@ void V8InspectorSessionImpl::dispatchProtocolMessage(
}
}
-std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
- std::vector<uint8_t> json;
- IPEStatus status = ConvertCBORToJSON(SpanFrom(state()), &json);
- DCHECK(status.ok());
- USE(status);
- return v8::base::make_unique<BinaryStringBuffer>(std::move(json));
-}
-
std::vector<uint8_t> V8InspectorSessionImpl::state() {
std::vector<uint8_t> out;
m_state->writeBinary(&out);
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index ea1d29773c..7a976bcd40 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -64,7 +64,6 @@ class V8InspectorSessionImpl : public V8InspectorSession,
// V8InspectorSession implementation.
void dispatchProtocolMessage(const StringView& message) override;
- std::unique_ptr<StringBuffer> stateJSON() override;
std::vector<uint8_t> state() override;
std::vector<std::unique_ptr<protocol::Schema::API::Domain>> supportedDomains()
override;
@@ -106,6 +105,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
const protocol::ProtocolMessage& message) override;
void flushProtocolNotifications() override;
+ std::unique_ptr<StringBuffer> serializeForFrontend(
+ std::unique_ptr<protocol::Serializable> message);
int m_contextGroupId;
int m_sessionId;
V8InspectorImpl* m_inspector;
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 15f93e39d7..3b02f7faa1 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -44,7 +44,8 @@ std::unique_ptr<protocol::Array<protocol::Profiler::PositionTickInfo>>
buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) {
unsigned lineCount = node->GetHitLineCount();
if (!lineCount) return nullptr;
- auto array = protocol::Array<protocol::Profiler::PositionTickInfo>::create();
+ auto array = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::PositionTickInfo>>();
std::vector<v8::CpuProfileNode::LineTick> entries(lineCount);
if (node->GetLineTicks(&entries[0], lineCount)) {
for (unsigned i = 0; i < lineCount; i++) {
@@ -53,7 +54,7 @@ buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) {
.setLine(entries[i].line)
.setTicks(entries[i].hit_count)
.build();
- array->addItem(std::move(line));
+ array->emplace_back(std::move(line));
}
}
return array;
@@ -79,9 +80,9 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
const int childrenCount = node->GetChildrenCount();
if (childrenCount) {
- auto children = protocol::Array<int>::create();
+ auto children = v8::base::make_unique<protocol::Array<int>>();
for (int i = 0; i < childrenCount; i++)
- children->addItem(node->GetChild(i)->GetNodeId());
+ children->emplace_back(node->GetChild(i)->GetNodeId());
result->setChildren(std::move(children));
}
@@ -97,21 +98,21 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples(
v8::CpuProfile* v8profile) {
- auto array = protocol::Array<int>::create();
+ auto array = v8::base::make_unique<protocol::Array<int>>();
int count = v8profile->GetSamplesCount();
for (int i = 0; i < count; i++)
- array->addItem(v8profile->GetSample(i)->GetNodeId());
+ array->emplace_back(v8profile->GetSample(i)->GetNodeId());
return array;
}
std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps(
v8::CpuProfile* v8profile) {
- auto array = protocol::Array<int>::create();
+ auto array = v8::base::make_unique<protocol::Array<int>>();
int count = v8profile->GetSamplesCount();
uint64_t lastTime = v8profile->GetStartTime();
for (int i = 0; i < count; i++) {
uint64_t ts = v8profile->GetSampleTimestamp(i);
- array->addItem(static_cast<int>(ts - lastTime));
+ array->emplace_back(static_cast<int>(ts - lastTime));
lastTime = ts;
}
return array;
@@ -120,7 +121,7 @@ std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps(
void flattenNodesTree(V8InspectorImpl* inspector,
const v8::CpuProfileNode* node,
protocol::Array<protocol::Profiler::ProfileNode>* list) {
- list->addItem(buildInspectorObjectFor(inspector, node));
+ list->emplace_back(buildInspectorObjectFor(inspector, node));
const int childrenCount = node->GetChildrenCount();
for (int i = 0; i < childrenCount; i++)
flattenNodesTree(inspector, node->GetChild(i), list);
@@ -128,7 +129,8 @@ void flattenNodesTree(V8InspectorImpl* inspector,
std::unique_ptr<protocol::Profiler::Profile> createCPUProfile(
V8InspectorImpl* inspector, v8::CpuProfile* v8profile) {
- auto nodes = protocol::Array<protocol::Profiler::ProfileNode>::create();
+ auto nodes =
+ v8::base::make_unique<protocol::Array<protocol::Profiler::ProfileNode>>();
flattenNodesTree(inspector, v8profile->GetTopDownRoot(), nodes.get());
return protocol::Profiler::Profile::create()
.setNodes(std::move(nodes))
@@ -336,36 +338,35 @@ Response coverageToProtocol(
V8InspectorImpl* inspector, const v8::debug::Coverage& coverage,
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
out_result) {
- std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>> result =
- protocol::Array<protocol::Profiler::ScriptCoverage>::create();
+ auto result = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::ScriptCoverage>>();
v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < coverage.ScriptCount(); i++) {
v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
v8::Local<v8::debug::Script> script = script_data.GetScript();
- std::unique_ptr<protocol::Array<protocol::Profiler::FunctionCoverage>>
- functions =
- protocol::Array<protocol::Profiler::FunctionCoverage>::create();
+ auto functions = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::FunctionCoverage>>();
for (size_t j = 0; j < script_data.FunctionCount(); j++) {
v8::debug::Coverage::FunctionData function_data =
script_data.GetFunctionData(j);
- std::unique_ptr<protocol::Array<protocol::Profiler::CoverageRange>>
- ranges = protocol::Array<protocol::Profiler::CoverageRange>::create();
+ auto ranges = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::CoverageRange>>();
// Add function range.
- ranges->addItem(createCoverageRange(function_data.StartOffset(),
- function_data.EndOffset(),
- function_data.Count()));
+ ranges->emplace_back(createCoverageRange(function_data.StartOffset(),
+ function_data.EndOffset(),
+ function_data.Count()));
// Process inner blocks.
for (size_t k = 0; k < function_data.BlockCount(); k++) {
v8::debug::Coverage::BlockData block_data =
function_data.GetBlockData(k);
- ranges->addItem(createCoverageRange(block_data.StartOffset(),
- block_data.EndOffset(),
- block_data.Count()));
+ ranges->emplace_back(createCoverageRange(block_data.StartOffset(),
+ block_data.EndOffset(),
+ block_data.Count()));
}
- functions->addItem(
+ functions->emplace_back(
protocol::Profiler::FunctionCoverage::create()
.setFunctionName(toProtocolString(
isolate,
@@ -381,11 +382,11 @@ Response coverageToProtocol(
} else if (script->Name().ToLocal(&name) && name->Length()) {
url = resourceNameToUrl(inspector, name);
}
- result->addItem(protocol::Profiler::ScriptCoverage::create()
- .setScriptId(String16::fromInteger(script->Id()))
- .setUrl(url)
- .setFunctions(std::move(functions))
- .build());
+ result->emplace_back(protocol::Profiler::ScriptCoverage::create()
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setUrl(url)
+ .setFunctions(std::move(functions))
+ .build());
}
*out_result = std::move(result);
return Response::OK();
@@ -417,31 +418,30 @@ namespace {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
typeProfileToProtocol(V8InspectorImpl* inspector,
const v8::debug::TypeProfile& type_profile) {
- std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
- result = protocol::Array<protocol::Profiler::ScriptTypeProfile>::create();
+ auto result = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::ScriptTypeProfile>>();
v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < type_profile.ScriptCount(); i++) {
v8::debug::TypeProfile::ScriptData script_data =
type_profile.GetScriptData(i);
v8::Local<v8::debug::Script> script = script_data.GetScript();
- std::unique_ptr<protocol::Array<protocol::Profiler::TypeProfileEntry>>
- entries =
- protocol::Array<protocol::Profiler::TypeProfileEntry>::create();
+ auto entries = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::TypeProfileEntry>>();
for (const auto& entry : script_data.Entries()) {
- std::unique_ptr<protocol::Array<protocol::Profiler::TypeObject>> types =
- protocol::Array<protocol::Profiler::TypeObject>::create();
+ auto types = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::TypeObject>>();
for (const auto& type : entry.Types()) {
- types->addItem(
+ types->emplace_back(
protocol::Profiler::TypeObject::create()
.setName(toProtocolString(
isolate, type.FromMaybe(v8::Local<v8::String>())))
.build());
}
- entries->addItem(protocol::Profiler::TypeProfileEntry::create()
- .setOffset(entry.SourcePosition())
- .setTypes(std::move(types))
- .build());
+ entries->emplace_back(protocol::Profiler::TypeProfileEntry::create()
+ .setOffset(entry.SourcePosition())
+ .setTypes(std::move(types))
+ .build());
}
String16 url;
v8::Local<v8::String> name;
@@ -450,11 +450,11 @@ typeProfileToProtocol(V8InspectorImpl* inspector,
} else if (script->Name().ToLocal(&name) && name->Length()) {
url = resourceNameToUrl(inspector, name);
}
- result->addItem(protocol::Profiler::ScriptTypeProfile::create()
- .setScriptId(String16::fromInteger(script->Id()))
- .setUrl(url)
- .setEntries(std::move(entries))
- .build());
+ result->emplace_back(protocol::Profiler::ScriptTypeProfile::create()
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setUrl(url)
+ .setEntries(std::move(entries))
+ .build());
}
return result;
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 601f263c7a..fd2d35abd7 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -107,7 +107,8 @@ bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
}
void innerCallFunctionOn(
- V8InspectorSessionImpl* session, InjectedScript::Scope& scope,
+ V8InspectorSessionImpl* session,
+ InjectedScript::Scope& scope, // NOLINT(runtime/references)
v8::Local<v8::Value> recv, const String16& expression,
Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
bool silent, WrapMode wrapMode, bool userGesture, bool awaitPromise,
@@ -120,12 +121,12 @@ void innerCallFunctionOn(
if (optionalArguments.isJust()) {
protocol::Array<protocol::Runtime::CallArgument>* arguments =
optionalArguments.fromJust();
- argc = static_cast<int>(arguments->length());
+ argc = static_cast<int>(arguments->size());
argv.reset(new v8::Local<v8::Value>[argc]);
for (int i = 0; i < argc; ++i) {
v8::Local<v8::Value> argumentValue;
Response response = scope.injectedScript()->resolveCallArgument(
- arguments->get(i), &argumentValue);
+ (*arguments)[i].get(), &argumentValue);
if (!response.isSuccess()) {
callback->sendFailure(response);
return;
@@ -419,9 +420,9 @@ Response V8RuntimeAgentImpl::getProperties(
object, scope.objectGroupName(), &internalPropertiesProtocolArray,
&privatePropertiesProtocolArray);
if (!response.isSuccess()) return response;
- if (internalPropertiesProtocolArray->length())
+ if (!internalPropertiesProtocolArray->empty())
*internalProperties = std::move(internalPropertiesProtocolArray);
- if (privatePropertiesProtocolArray->length())
+ if (!privatePropertiesProtocolArray->empty())
*privateProperties = std::move(privatePropertiesProtocolArray);
return Response::OK();
}
@@ -612,9 +613,9 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames(
v8::PersistentValueVector<v8::String> names(m_inspector->isolate());
v8::debug::GlobalLexicalScopeNames(scope.context(), &names);
- *outNames = protocol::Array<String16>::create();
+ *outNames = v8::base::make_unique<protocol::Array<String16>>();
for (size_t i = 0; i < names.Size(); ++i) {
- (*outNames)->addItem(
+ (*outNames)->emplace_back(
toProtocolString(m_inspector->isolate(), names.Get(i)));
}
return Response::OK();
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.cc b/deps/v8/src/inspector/v8-schema-agent-impl.cc
index 07bbd35d97..808f59b0bf 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-schema-agent-impl.h"
+#include "src/base/template-utils.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/v8-inspector-session-impl.h"
@@ -18,11 +19,9 @@ V8SchemaAgentImpl::~V8SchemaAgentImpl() = default;
Response V8SchemaAgentImpl::getDomains(
std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
- std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
- m_session->supportedDomainsImpl();
- *result = protocol::Array<protocol::Schema::Domain>::create();
- for (size_t i = 0; i < domains.size(); ++i)
- (*result)->addItem(std::move(domains[i]));
+ *result = v8::base::make_unique<
+ std::vector<std::unique_ptr<protocol::Schema::Domain>>>(
+ m_session->supportedDomainsImpl());
return Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 8bf16b4baf..e2be811069 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "src/base/template-utils.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/wasm-translation.h"
@@ -72,13 +73,13 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
return asyncParent->buildInspectorObject(debugger, maxAsyncDepth);
}
- std::unique_ptr<protocol::Array<protocol::Runtime::CallFrame>>
- inspectorFrames = protocol::Array<protocol::Runtime::CallFrame>::create();
- for (size_t i = 0; i < frames.size(); i++) {
+ auto inspectorFrames =
+ v8::base::make_unique<protocol::Array<protocol::Runtime::CallFrame>>();
+ for (const std::shared_ptr<StackFrame>& frame : frames) {
V8InspectorClient* client = nullptr;
if (debugger && debugger->inspector())
client = debugger->inspector()->client();
- inspectorFrames->addItem(frames[i]->buildInspectorObject(client));
+ inspectorFrames->emplace_back(frame->buildInspectorObject(client));
}
std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
protocol::Runtime::StackTrace::create()
@@ -284,6 +285,12 @@ V8StackTraceImpl::buildInspectorObject() const {
return buildInspectorObjectImpl(nullptr);
}
+std::unique_ptr<protocol::Runtime::API::StackTrace>
+V8StackTraceImpl::buildInspectorObject(int maxAsyncDepth) const {
+ return buildInspectorObjectImpl(nullptr,
+ std::min(maxAsyncDepth, m_maxAsyncDepth));
+}
+
std::unique_ptr<StringBuffer> V8StackTraceImpl::toString() const {
String16Builder stackTrace;
for (size_t i = 0; i < m_frames.size(); ++i) {
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 1142cfaa82..681b3c2aba 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -78,6 +78,8 @@ class V8StackTraceImpl : public V8StackTrace {
StringView topFunctionName() const override;
std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject()
const override;
+ std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject(
+ int maxAsyncDepth) const override;
std::unique_ptr<StringBuffer> toString() const override;
bool isEqualIgnoringTopFrame(V8StackTraceImpl* stackTrace) const;
diff --git a/deps/v8/src/inspector/v8-string-conversions.cc b/deps/v8/src/inspector/v8-string-conversions.cc
index 0c75e66b97..4ccf6351fb 100644
--- a/deps/v8/src/inspector/v8-string-conversions.cc
+++ b/deps/v8/src/inspector/v8-string-conversions.cc
@@ -228,7 +228,9 @@ static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
static_cast<UChar32>(0xFA082080UL),
static_cast<UChar32>(0x82082080UL)};
-static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
+static inline UChar32 readUTF8Sequence(
+ const char*& sequence, // NOLINT(runtime/references)
+ size_t length) {
UChar32 character = 0;
// The cases all fall through.
@@ -334,7 +336,8 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
// Helper to write a three-byte UTF-8 code point to the buffer, caller must
// check room is available.
-static inline void putUTF8Triple(char*& buffer, UChar ch) {
+static inline void putUTF8Triple(char*& buffer, // NOLINT(runtime/references)
+ UChar ch) {
*buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
*buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
*buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 72eef3cd3f..3ab9085c44 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -351,7 +351,8 @@ class PrimitiveValueMirror final : public ValueMirror {
.setType(m_type)
.setDescription(descriptionForPrimitiveType(context, m_value))
.setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
.build();
if (m_value->IsNull())
(*preview)->setSubtype(RemoteObject::SubtypeEnum::Null);
@@ -411,12 +412,14 @@ class NumberMirror final : public ValueMirror {
v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
std::unique_ptr<ObjectPreview>* preview) const override {
bool unserializable = false;
- *preview = ObjectPreview::create()
- .setType(RemoteObject::TypeEnum::Number)
- .setDescription(description(&unserializable))
- .setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
- .build();
+ *preview =
+ ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Number)
+ .setDescription(description(&unserializable))
+ .setOverflow(false)
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .build();
}
private:
@@ -467,12 +470,14 @@ class BigIntMirror final : public ValueMirror {
int* indexLimit,
std::unique_ptr<protocol::Runtime::ObjectPreview>*
preview) const override {
- *preview = ObjectPreview::create()
- .setType(RemoteObject::TypeEnum::Bigint)
- .setDescription(descriptionForBigInt(context, m_value))
- .setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
- .build();
+ *preview =
+ ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Bigint)
+ .setDescription(descriptionForBigInt(context, m_value))
+ .setOverflow(false)
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .build();
}
v8::Local<v8::Value> v8Value() const override { return m_value; }
@@ -625,12 +630,14 @@ class FunctionMirror final : public ValueMirror {
void buildEntryPreview(
v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
std::unique_ptr<ObjectPreview>* preview) const override {
- *preview = ObjectPreview::create()
- .setType(RemoteObject::TypeEnum::Function)
- .setDescription(descriptionForFunction(context, m_value))
- .setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
- .build();
+ *preview =
+ ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Function)
+ .setDescription(descriptionForFunction(context, m_value))
+ .setOverflow(false)
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .build();
}
private:
@@ -824,7 +831,7 @@ void getPrivatePropertiesForPreview(
return;
}
--*nameLimit;
- privateProperties->addItem(std::move(propertyPreview));
+ privateProperties->emplace_back(std::move(propertyPreview));
}
}
@@ -911,8 +918,7 @@ class ObjectMirror final : public ValueMirror {
v8::Local<v8::Context> context, bool forEntry,
bool generatePreviewForTable, int* nameLimit, int* indexLimit,
std::unique_ptr<ObjectPreview>* result) const {
- std::unique_ptr<protocol::Array<PropertyPreview>> properties =
- protocol::Array<PropertyPreview>::create();
+ auto properties = v8::base::make_unique<protocol::Array<PropertyPreview>>();
std::unique_ptr<protocol::Array<EntryPreview>> entriesPreview;
bool overflow = false;
@@ -929,7 +935,7 @@ class ObjectMirror final : public ValueMirror {
internalProperties[i].value->buildPropertyPreview(
context, internalProperties[i].name, &propertyPreview);
if (propertyPreview) {
- properties->addItem(std::move(propertyPreview));
+ properties->emplace_back(std::move(propertyPreview));
}
}
@@ -959,7 +965,7 @@ class ObjectMirror final : public ValueMirror {
if (valuePreview) {
preview->setValuePreview(std::move(valuePreview));
}
- properties->addItem(std::move(preview));
+ properties->emplace_back(std::move(preview));
}
}
@@ -969,7 +975,8 @@ class ObjectMirror final : public ValueMirror {
if (forEntry) {
overflow = true;
} else {
- entriesPreview = protocol::Array<EntryPreview>::create();
+ entriesPreview =
+ v8::base::make_unique<protocol::Array<EntryPreview>>();
for (const auto& entry : entries) {
std::unique_ptr<ObjectPreview> valuePreview;
entry.value->buildEntryPreview(context, nameLimit, indexLimit,
@@ -986,7 +993,7 @@ class ObjectMirror final : public ValueMirror {
.setValue(std::move(valuePreview))
.build();
if (keyPreview) entryPreview->setKey(std::move(keyPreview));
- entriesPreview->addItem(std::move(entryPreview));
+ entriesPreview->emplace_back(std::move(entryPreview));
}
}
}
@@ -1145,19 +1152,28 @@ void addTypedArrayViews(v8::Local<v8::Context> context,
v8::Local<ArrayBuffer> buffer,
ValueMirror::PropertyAccumulator* accumulator) {
// TODO(alph): these should be internal properties.
- size_t length = buffer->ByteLength();
+ // TODO(v8:9308): Reconsider how large arrays are previewed.
+ const size_t byte_length = buffer->ByteLength();
+
+ size_t length = byte_length;
+ if (length > v8::TypedArray::kMaxLength) return;
+
addTypedArrayView<v8::Int8Array>(context, buffer, length, "[[Int8Array]]",
accumulator);
addTypedArrayView<v8::Uint8Array>(context, buffer, length, "[[Uint8Array]]",
accumulator);
- if (buffer->ByteLength() % 2 == 0) {
- addTypedArrayView<v8::Int16Array>(context, buffer, length / 2,
- "[[Int16Array]]", accumulator);
- }
- if (buffer->ByteLength() % 4 == 0) {
- addTypedArrayView<v8::Int32Array>(context, buffer, length / 4,
- "[[Int32Array]]", accumulator);
- }
+
+ length = byte_length / 2;
+ if (length > v8::TypedArray::kMaxLength || (byte_length % 2) != 0) return;
+
+ addTypedArrayView<v8::Int16Array>(context, buffer, length, "[[Int16Array]]",
+ accumulator);
+
+ length = byte_length / 4;
+ if (length > v8::TypedArray::kMaxLength || (byte_length % 4) != 0) return;
+
+ addTypedArrayView<v8::Int32Array>(context, buffer, length, "[[Int32Array]]",
+ accumulator);
}
} // anonymous namespace
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index e985bda102..254e6e60d1 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
leszeks@chromium.org
mstarzinger@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index d7232fcd4c..d460c1a45f 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -14,15 +14,61 @@ namespace v8 {
namespace internal {
namespace interpreter {
+namespace {
+
+class OnHeapBytecodeArray final : public AbstractBytecodeArray {
+ public:
+ explicit OnHeapBytecodeArray(Handle<BytecodeArray> bytecode_array)
+ : array_(bytecode_array) {}
+
+ int length() const override { return array_->length(); }
+
+ int parameter_count() const override { return array_->parameter_count(); }
+
+ uint8_t get(int index) const override { return array_->get(index); }
+
+ void set(int index, uint8_t value) override {
+ return array_->set(index, value);
+ }
+
+ Address GetFirstBytecodeAddress() const override {
+ return array_->GetFirstBytecodeAddress();
+ }
+
+ Handle<Object> GetConstantAtIndex(int index,
+ Isolate* isolate) const override {
+ return handle(array_->constant_pool().get(index), isolate);
+ }
+
+ bool IsConstantAtIndexSmi(int index) const override {
+ return array_->constant_pool().get(index).IsSmi();
+ }
+
+ Smi GetConstantAtIndexAsSmi(int index) const override {
+ return Smi::cast(array_->constant_pool().get(index));
+ }
+
+ private:
+ Handle<BytecodeArray> array_;
+};
+
+} // namespace
+
BytecodeArrayAccessor::BytecodeArrayAccessor(
- Handle<BytecodeArray> bytecode_array, int initial_offset)
- : bytecode_array_(bytecode_array),
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array, int initial_offset)
+ : bytecode_array_(std::move(bytecode_array)),
bytecode_offset_(initial_offset),
operand_scale_(OperandScale::kSingle),
prefix_offset_(0) {
UpdateOperandScale();
}
+BytecodeArrayAccessor::BytecodeArrayAccessor(
+ Handle<BytecodeArray> bytecode_array, int initial_offset)
+ : BytecodeArrayAccessor(
+ base::make_unique<OnHeapBytecodeArray>(bytecode_array),
+ initial_offset) {}
+
void BytecodeArrayAccessor::SetOffset(int offset) {
bytecode_offset_ = offset;
UpdateOperandScale();
@@ -33,12 +79,12 @@ void BytecodeArrayAccessor::ApplyDebugBreak() {
// scaling prefix, which we can patch with the matching debug-break
// variant.
interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array_->get(bytecode_offset_));
+ interpreter::Bytecodes::FromByte(bytecode_array()->get(bytecode_offset_));
if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
interpreter::Bytecode debugbreak =
interpreter::Bytecodes::GetDebugBreak(bytecode);
- bytecode_array_->set(bytecode_offset_,
- interpreter::Bytecodes::ToByte(debugbreak));
+ bytecode_array()->set(bytecode_offset_,
+ interpreter::Bytecodes::ToByte(debugbreak));
}
void BytecodeArrayAccessor::UpdateOperandScale() {
@@ -197,13 +243,22 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
-Object BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
- return bytecode_array()->constant_pool().get(index);
+Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex(
+ int index, Isolate* isolate) const {
+ return bytecode_array()->GetConstantAtIndex(index, isolate);
}
-Object BytecodeArrayAccessor::GetConstantForIndexOperand(
- int operand_index) const {
- return GetConstantAtIndex(GetIndexOperand(operand_index));
+bool BytecodeArrayAccessor::IsConstantAtIndexSmi(int index) const {
+ return bytecode_array()->IsConstantAtIndexSmi(index);
+}
+
+Smi BytecodeArrayAccessor::GetConstantAtIndexAsSmi(int index) const {
+ return bytecode_array()->GetConstantAtIndexAsSmi(index);
+}
+
+Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+ int operand_index, Isolate* isolate) const {
+ return GetConstantAtIndex(GetIndexOperand(operand_index), isolate);
}
int BytecodeArrayAccessor::GetJumpTargetOffset() const {
@@ -215,7 +270,7 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
}
return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi smi = Smi::cast(GetConstantForIndexOperand(0));
+ Smi smi = GetConstantAtIndexAsSmi(GetIndexOperand(0));
return GetAbsoluteOffset(smi.value());
} else {
UNREACHABLE();
@@ -315,19 +370,16 @@ bool JumpTableTargetOffsets::iterator::operator!=(
}
void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
- if (table_offset_ >= table_end_) return;
-
- Object current = accessor_->GetConstantAtIndex(table_offset_);
- while (!current.IsSmi()) {
- DCHECK(current.IsTheHole());
+ while (table_offset_ < table_end_ &&
+ !accessor_->IsConstantAtIndexSmi(table_offset_)) {
++table_offset_;
++index_;
- if (table_offset_ >= table_end_) break;
- current = accessor_->GetConstantAtIndex(table_offset_);
}
+
// Make sure we haven't reached the end of the table with a hole in current.
- if (current.IsSmi()) {
- current_ = Smi::cast(current);
+ if (table_offset_ < table_end_) {
+ DCHECK(accessor_->IsConstantAtIndexSmi(table_offset_));
+ current_ = accessor_->GetConstantAtIndexAsSmi(table_offset_);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 91b6886204..97278af7bd 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/interpreter/bytecode-register.h"
@@ -64,8 +65,27 @@ class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
int case_value_base_;
};
+class V8_EXPORT_PRIVATE AbstractBytecodeArray {
+ public:
+ virtual int length() const = 0;
+ virtual int parameter_count() const = 0;
+ virtual uint8_t get(int index) const = 0;
+ virtual void set(int index, uint8_t value) = 0;
+ virtual Address GetFirstBytecodeAddress() const = 0;
+
+ virtual Handle<Object> GetConstantAtIndex(int index,
+ Isolate* isolate) const = 0;
+ virtual bool IsConstantAtIndexSmi(int index) const = 0;
+ virtual Smi GetConstantAtIndexAsSmi(int index) const = 0;
+
+ virtual ~AbstractBytecodeArray() = default;
+};
+
class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
public:
+ BytecodeArrayAccessor(std::unique_ptr<AbstractBytecodeArray> bytecode_array,
+ int initial_offset);
+
BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
int initial_offset);
@@ -78,8 +98,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
int current_offset() const { return bytecode_offset_; }
OperandScale current_operand_scale() const { return operand_scale_; }
int current_prefix_offset() const { return prefix_offset_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
+ AbstractBytecodeArray* bytecode_array() const {
+ return bytecode_array_.get();
}
uint32_t GetFlagOperand(int operand_index) const;
@@ -93,8 +113,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
uint32_t GetNativeContextIndexOperand(int operand_index) const;
- Object GetConstantAtIndex(int offset) const;
- Object GetConstantForIndexOperand(int operand_index) const;
+ Handle<Object> GetConstantAtIndex(int offset, Isolate* isolate) const;
+ bool IsConstantAtIndexSmi(int offset) const;
+ Smi GetConstantAtIndexAsSmi(int offset) const;
+ Handle<Object> GetConstantForIndexOperand(int operand_index,
+ Isolate* isolate) const;
// Returns the absolute offset of the branch target at the current bytecode.
// It is an error to call this method if the bytecode is not for a jump or
@@ -122,7 +145,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
void UpdateOperandScale();
- Handle<BytecodeArray> bytecode_array_;
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array_;
int bytecode_offset_;
OperandScale operand_scale_;
int prefix_offset_;
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index b582311007..0fc57f85b8 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -11,6 +11,10 @@ namespace internal {
namespace interpreter {
BytecodeArrayIterator::BytecodeArrayIterator(
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array)
+ : BytecodeArrayAccessor(std::move(bytecode_array), 0) {}
+
+BytecodeArrayIterator::BytecodeArrayIterator(
Handle<BytecodeArray> bytecode_array)
: BytecodeArrayAccessor(bytecode_array, 0) {}
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 7ec9d1288c..e6b58deadc 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -14,7 +14,9 @@ namespace interpreter {
class V8_EXPORT_PRIVATE BytecodeArrayIterator final
: public BytecodeArrayAccessor {
public:
- explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
+ explicit BytecodeArrayIterator(std::unique_ptr<AbstractBytecodeArray> array);
+
+ explicit BytecodeArrayIterator(Handle<BytecodeArray> array);
void Advance();
bool done() const;
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
index 4ed5ce5e7d..9362232899 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -11,11 +11,21 @@ namespace internal {
namespace interpreter {
BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array, Zone* zone)
+ : BytecodeArrayAccessor(std::move(bytecode_array), 0), offsets_(zone) {
+ Initialize();
+}
+
+BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
Handle<BytecodeArray> bytecode_array, Zone* zone)
: BytecodeArrayAccessor(bytecode_array, 0), offsets_(zone) {
+ Initialize();
+}
+
+void BytecodeArrayRandomIterator::Initialize() {
// Run forwards through the bytecode array to determine the offset of each
// bytecode.
- while (current_offset() < bytecode_array->length()) {
+ while (current_offset() < bytecode_array()->length()) {
offsets_.push_back(current_offset());
SetOffset(current_offset() + current_bytecode_size());
}
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index 7d559ea176..a3b69b7015 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -16,8 +16,10 @@ namespace interpreter {
class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
: public BytecodeArrayAccessor {
public:
- explicit BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array,
- Zone* zone);
+ BytecodeArrayRandomIterator(
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array, Zone* zone);
+
+ BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array, Zone* zone);
BytecodeArrayRandomIterator& operator++() {
++current_index_;
@@ -66,6 +68,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
ZoneVector<int> offsets_;
int current_index_;
+ void Initialize();
void UpdateOffsetFromIndex();
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 3769eefda1..3ecc5e1a89 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -334,8 +334,8 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
// The jump fits within the range of an Imm16 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
- WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes),
- static_cast<uint16_t>(delta));
+ base::WriteUnalignedValue<uint16_t>(
+ reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(delta));
} else {
// The jump does not fit within the range of an Imm16 operand, so
// commit reservation putting the offset into the constant pool,
@@ -344,8 +344,8 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
OperandSize::kShort, Smi::FromInt(delta));
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
- WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes),
- static_cast<uint16_t>(entry));
+ base::WriteUnalignedValue<uint16_t>(
+ reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(entry));
}
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
@@ -359,8 +359,8 @@ void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location,
Bytecodes::FromByte(bytecodes()->at(jump_location))));
constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
uint8_t operand_bytes[4];
- WriteUnalignedUInt32(reinterpret_cast<Address>(operand_bytes),
- static_cast<uint32_t>(delta));
+ base::WriteUnalignedValue<uint32_t>(reinterpret_cast<Address>(operand_bytes),
+ static_cast<uint32_t>(delta));
size_t operand_location = jump_location + 1;
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index 6f2f9dda0d..3a297b1ddf 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -42,9 +42,11 @@ int32_t BytecodeDecoder::DecodeSignedOperand(Address operand_start,
case OperandSize::kByte:
return *reinterpret_cast<const int8_t*>(operand_start);
case OperandSize::kShort:
- return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
+ return static_cast<int16_t>(
+ base::ReadUnalignedValue<uint16_t>(operand_start));
case OperandSize::kQuad:
- return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
+ return static_cast<int32_t>(
+ base::ReadUnalignedValue<uint32_t>(operand_start));
case OperandSize::kNone:
UNREACHABLE();
}
@@ -60,9 +62,9 @@ uint32_t BytecodeDecoder::DecodeUnsignedOperand(Address operand_start,
case OperandSize::kByte:
return *reinterpret_cast<const uint8_t*>(operand_start);
case OperandSize::kShort:
- return ReadUnalignedUInt16(operand_start);
+ return base::ReadUnalignedValue<uint16_t>(operand_start);
case OperandSize::kQuad:
- return ReadUnalignedUInt32(operand_start);
+ return base::ReadUnalignedValue<uint32_t>(operand_start);
case OperandSize::kNone:
UNREACHABLE();
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 706580ac14..d3b27b4375 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -915,6 +915,45 @@ class BytecodeGenerator::IteratorRecord final {
Register next_;
};
+namespace {
+
+// A map from property names to getter/setter pairs allocated in the zone that
+// also provides a way of accessing the pairs in the order they were first
+// added so that the generated bytecode is always the same.
+class AccessorTable
+ : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ bool (*)(void*, void*),
+ ZoneAllocationPolicy> {
+ public:
+ explicit AccessorTable(Zone* zone)
+ : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ bool (*)(void*, void*), ZoneAllocationPolicy>(
+ Literal::Match, ZoneAllocationPolicy(zone)),
+ zone_(zone) {}
+
+ Iterator lookup(Literal* literal) {
+ Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
+ if (it->second == nullptr) {
+ it->second = new (zone_) ObjectLiteral::Accessors();
+ ordered_accessors_.push_back({literal, it->second});
+ }
+ return it;
+ }
+
+ const std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>>&
+ ordered_accessors() {
+ return ordered_accessors_;
+ }
+
+ private:
+ std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>>
+ ordered_accessors_;
+
+ Zone* zone_;
+};
+
+} // namespace
+
#ifdef DEBUG
static bool IsInEagerLiterals(
@@ -1354,7 +1393,8 @@ void BytecodeGenerator::VisitModuleNamespaceImports() {
RegisterAllocationScope register_scope(this);
Register module_request = register_allocator()->NewRegister();
- ModuleDescriptor* descriptor = closure_scope()->AsModuleScope()->module();
+ SourceTextModuleDescriptor* descriptor =
+ closure_scope()->AsModuleScope()->module();
for (auto entry : descriptor->namespace_imports()) {
builder()
->LoadLiteral(Smi::FromInt(entry->module_request))
@@ -2201,6 +2241,19 @@ void BytecodeGenerator::VisitInitializeClassMembersStatement(
}
}
+void BytecodeGenerator::BuildThrowPrivateMethodWriteError(
+ const AstRawString* name) {
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(Smi::FromEnum(MessageTemplate::kInvalidPrivateMethodWrite))
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(name)
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewTypeError, args)
+ .Throw();
+}
+
void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver) {
RegisterList brand_args = register_allocator()->NewRegisterList(2);
Variable* brand = info()->scope()->outer_scope()->AsClassScope()->brand();
@@ -2366,13 +2419,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
RegisterAllocationScope register_scope(this);
Expression* property = expr->properties()->first()->value();
Register from_value = VisitForRegisterValue(property);
-
- BytecodeLabels clone_object(zone());
- builder()->JumpIfUndefined(clone_object.New());
- builder()->JumpIfNull(clone_object.New());
- builder()->ToObject(from_value);
-
- clone_object.Bind(builder());
int clone_index = feedback_index(feedback_spec()->AddCloneObjectSlot());
builder()->CloneObject(from_value, flags, clone_index);
builder()->StoreAccumulatorInRegister(literal);
@@ -2473,14 +2519,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Define accessors, using only a single call to the runtime for each pair of
// corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end(); ++it) {
+ for (auto accessors : accessor_table.ordered_accessors()) {
RegisterAllocationScope inner_register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(5);
builder()->MoveRegister(literal, args[0]);
- VisitForRegisterValue(it->first, args[1]);
- VisitObjectLiteralAccessor(literal, it->second->getter, args[2]);
- VisitObjectLiteralAccessor(literal, it->second->setter, args[3]);
+ VisitForRegisterValue(accessors.first, args[1]);
+ VisitObjectLiteralAccessor(literal, accessors.second->getter, args[2]);
+ VisitObjectLiteralAccessor(literal, accessors.second->setter, args[3]);
builder()
->LoadLiteral(Smi::FromInt(NONE))
.StoreAccumulatorInRegister(args[4])
@@ -3156,6 +3201,13 @@ BytecodeGenerator::AssignmentLhsData::NamedSuperProperty(
}
// static
BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::PrivateMethod(Register object,
+ const AstRawString* name) {
+ return AssignmentLhsData(PRIVATE_METHOD, nullptr, RegisterList(), object,
+ Register(), nullptr, name);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::KeyedSuperProperty(
RegisterList super_property_args) {
return AssignmentLhsData(KEYED_SUPER_PROPERTY, nullptr, super_property_args,
@@ -3185,6 +3237,13 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
Register key = VisitForRegisterValue(property->key());
return AssignmentLhsData::KeyedProperty(object, key);
}
+ case PRIVATE_METHOD: {
+ DCHECK(!property->IsSuperAccess());
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ const AstRawString* name = property->key()->AsVariableProxy()->raw_name();
+ return AssignmentLhsData::PrivateMethod(object, name);
+ }
case NAMED_SUPER_PROPERTY: {
AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
RegisterList super_property_args =
@@ -3219,15 +3278,16 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
// Build the iteration finalizer called in the finally block of an iteration
// protocol execution. This closes the iterator if needed, and suppresses any
-// exception it throws if necessary.
+// exception it throws if necessary, including the exception when the return
+// method is not callable.
//
// In pseudo-code, this builds:
//
// if (!done) {
// let method = iterator.return
// if (method !== null && method !== undefined) {
-// if (typeof(method) !== "function") throw TypeError
// try {
+// if (typeof(method) !== "function") throw TypeError
// let return_val = method.call(iterator)
// if (!%IsObject(return_val)) throw TypeError
// } catch (e) {
@@ -3259,33 +3319,35 @@ void BytecodeGenerator::BuildFinalizeIteration(
.JumpIfUndefined(iterator_is_done.New())
.JumpIfNull(iterator_is_done.New());
- // if (typeof(method) !== "function") throw TypeError
- BytecodeLabel if_callable;
- builder()
- ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable);
- {
- // throw %NewTypeError(kReturnMethodNotCallable)
- RegisterAllocationScope register_scope(this);
- RegisterList new_type_error_args = register_allocator()->NewRegisterList(2);
- builder()
- ->LoadLiteral(Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable))
- .StoreAccumulatorInRegister(new_type_error_args[0])
- .LoadLiteral(ast_string_constants()->empty_string())
- .StoreAccumulatorInRegister(new_type_error_args[1])
- .CallRuntime(Runtime::kNewTypeError, new_type_error_args)
- .Throw();
- }
- builder()->Bind(&if_callable);
-
{
RegisterAllocationScope register_scope(this);
BuildTryCatch(
// try {
+ // if (typeof(method) !== "function") throw TypeError
// let return_val = method.call(iterator)
// if (!%IsObject(return_val)) throw TypeError
// }
[&]() {
+ BytecodeLabel if_callable;
+ builder()
+ ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable);
+ {
+ // throw %NewTypeError(kReturnMethodNotCallable)
+ RegisterAllocationScope register_scope(this);
+ RegisterList new_type_error_args =
+ register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(
+ Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable))
+ .StoreAccumulatorInRegister(new_type_error_args[0])
+ .LoadLiteral(ast_string_constants()->empty_string())
+ .StoreAccumulatorInRegister(new_type_error_args[1])
+ .CallRuntime(Runtime::kNewTypeError, new_type_error_args)
+ .Throw();
+ }
+ builder()->Bind(&if_callable);
+
RegisterList args(iterator.object());
builder()->CallProperty(
method, args, feedback_index(feedback_spec()->AddCallICSlot()));
@@ -3736,6 +3798,10 @@ void BytecodeGenerator::BuildAssignment(
lhs_data.super_property_args());
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(lhs_data.name());
+ break;
+ }
}
}
@@ -3781,6 +3847,10 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
lhs_data.super_property_args().Truncate(3));
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(lhs_data.name());
+ break;
+ }
}
BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
@@ -4238,6 +4308,23 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
case KEYED_SUPER_PROPERTY:
VisitKeyedSuperPropertyLoad(property, Register::invalid_value());
break;
+ case PRIVATE_METHOD: {
+ Variable* private_name = property->key()->AsVariableProxy()->var();
+
+ // Perform the brand check.
+ DCHECK(private_name->requires_brand_check());
+ ClassScope* scope = private_name->scope()->AsClassScope();
+ Variable* brand = scope->brand();
+ BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided);
+ builder()->SetExpressionPosition(property);
+ builder()->LoadKeyedProperty(
+ obj, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
+
+ // In the case of private methods, property->key() is the function to be
+ // loaded (stored in a context slot), so load this directly.
+ VisitForAccumulatorValue(property->key());
+ break;
+ }
}
}
@@ -4342,7 +4429,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// the semantics of the underlying call type.
switch (call_type) {
case Call::NAMED_PROPERTY_CALL:
- case Call::KEYED_PROPERTY_CALL: {
+ case Call::KEYED_PROPERTY_CALL:
+ case Call::PRIVATE_CALL: {
Property* property = callee_expr->AsProperty();
VisitAndPushIntoRegisterList(property->obj(), &args);
VisitPropertyLoadForRegister(args.last_register(), property, callee);
@@ -4678,6 +4766,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
// Delete of an object property is allowed both in sloppy
// and strict modes.
Property* property = expr->AsProperty();
+ DCHECK(!property->IsPrivateReference());
Register object = VisitForRegisterValue(property->obj());
VisitForAccumulatorValue(property->key());
builder()->Delete(object, language_mode());
@@ -4785,6 +4874,11 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args);
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(
+ property->key()->AsVariableProxy()->raw_name());
+ break;
+ }
}
// Save result for postfix expressions.
@@ -4851,6 +4945,11 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
.CallRuntime(Runtime::kStoreKeyedToSuper, super_property_args);
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(
+ property->key()->AsVariableProxy()->raw_name());
+ break;
+ }
}
// Restore old value for postfix expressions.
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index dda8b15c80..b754d2c296 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -84,6 +84,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register object,
const AstRawString* name);
static AssignmentLhsData KeyedProperty(Register object, Register key);
+ static AssignmentLhsData PrivateMethod(Register object,
+ const AstRawString* name);
static AssignmentLhsData NamedSuperProperty(
RegisterList super_property_args);
static AssignmentLhsData KeyedSuperProperty(
@@ -99,15 +101,16 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return object_expr_;
}
Register object() const {
- DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY);
+ DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY ||
+ assign_type_ == PRIVATE_METHOD);
return object_;
}
Register key() const {
- DCHECK_EQ(assign_type_, KEYED_PROPERTY);
+ DCHECK(assign_type_ == KEYED_PROPERTY);
return key_;
}
const AstRawString* name() const {
- DCHECK_EQ(assign_type_, NAMED_PROPERTY);
+ DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == PRIVATE_METHOD);
return name_;
}
RegisterList super_property_args() const {
@@ -135,7 +138,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
//
// NON_PROPERTY: expr
// NAMED_PROPERTY: object_expr, object, name
- // KEYED_PROPERTY: object, key
+ // KEYED_PROPERTY, PRIVATE_METHOD: object, key
// NAMED_SUPER_PROPERTY: super_property_args
// KEYED_SUPER_PROPERT: super_property_args
Expression* expr_;
@@ -238,8 +241,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Build jump to targets[value], where
// start_index <= value < start_index + size.
- void BuildIndexedJump(Register value, size_t start_index, size_t size,
- ZoneVector<BytecodeLabel>& targets);
+ void BuildIndexedJump(
+ Register value, size_t start_index, size_t size,
+ ZoneVector<BytecodeLabel>& targets); // NOLINT(runtime/references)
void BuildNewLocalActivationContext();
void BuildLocalActivationContextInitialization();
@@ -291,6 +295,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
+ void BuildThrowPrivateMethodWriteError(const AstRawString* name);
void BuildPrivateClassMemberNameAssignment(ClassLiteral::Property* property);
void BuildClassLiteral(ClassLiteral* expr, Register name);
void VisitClassLiteral(ClassLiteral* expr, Register name);
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index db7ed750dd..66b8d1f937 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -5,9 +5,10 @@
#ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
-#include "src/execution/frames.h"
+#include "src/codegen/handler-table.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects/fixed-array.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 0af58b674f..7291ea1c35 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -1265,7 +1265,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
// Make sure we include the current bytecode in the budget calculation.
TNode<Int32T> budget_after_bytecode =
- Signed(Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize())));
+ Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
Label done(this);
TVARIABLE(Int32T, new_budget);
@@ -1501,9 +1501,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
UpdateInterruptBudget(profiling_weight, true);
}
-Node* InterpreterAssembler::LoadOSRNestingLevel() {
+Node* InterpreterAssembler::LoadOsrNestingLevel() {
return LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kOSRNestingLevelOffset,
+ BytecodeArray::kOsrNestingLevelOffset,
MachineType::Int8());
}
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index db4523b744..a135eaacdd 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -237,7 +237,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void UpdateInterruptBudgetOnReturn();
// Returns the OSR nesting level from the bytecode header.
- compiler::Node* LoadOSRNestingLevel();
+ compiler::Node* LoadOsrNestingLevel();
// Dispatch to the bytecode.
compiler::Node* Dispatch();
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 852aae4482..00ce8eaf68 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -21,9 +21,9 @@
#include "src/interpreter/interpreter-intrinsics-generator.h"
#include "src/objects/cell.h"
#include "src/objects/js-generator.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
+#include "src/objects/source-text-module.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -512,17 +512,18 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
// Load receiver.
Node* recv = LoadRegisterAtOperandIndex(0);
- // Load the name.
- // TODO(jgruber): Not needed for monomorphic smi handler constant/field case.
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
- Node* context = GetContext();
+ // Load the name and context lazily.
+ LazyNode<Name> name = [=] {
+ return CAST(LoadConstantPoolEntryAtOperandIndex(1));
+ };
+ LazyNode<Context> context = [=] { return CAST(GetContext()); };
Label done(this);
Variable var_result(this, MachineRepresentation::kTagged);
ExitPoint exit_point(this, &done, &var_result);
- AccessorAssembler::LoadICParameters params(context, recv, name, smi_slot,
- feedback_vector);
+ AccessorAssembler::LazyLoadICParameters params(context, recv, name, smi_slot,
+ feedback_vector);
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
@@ -735,7 +736,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
BIND(&if_export);
{
TNode<FixedArray> regular_exports =
- CAST(LoadObjectField(module, Module::kRegularExportsOffset));
+ CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset));
// The actual array index is (cell_index - 1).
Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
Node* cell = LoadFixedArrayElement(regular_exports, export_index);
@@ -746,7 +747,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
BIND(&if_import);
{
TNode<FixedArray> regular_imports =
- CAST(LoadObjectField(module, Module::kRegularImportsOffset));
+ CAST(LoadObjectField(module, SourceTextModule::kRegularImportsOffset));
// The actual array index is (-cell_index - 1).
Node* import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
Node* cell = LoadFixedArrayElement(regular_imports, import_index);
@@ -777,7 +778,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
BIND(&if_export);
{
TNode<FixedArray> regular_exports =
- CAST(LoadObjectField(module, Module::kRegularExportsOffset));
+ CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset));
// The actual array index is (cell_index - 1).
Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
Node* cell = LoadFixedArrayElement(regular_exports, export_index);
@@ -2336,7 +2337,7 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
Node* relative_jump = BytecodeOperandUImmWord(0);
Node* loop_depth = BytecodeOperandImm(1);
- Node* osr_level = LoadOSRNestingLevel();
+ Node* osr_level = LoadOsrNestingLevel();
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 19d17baa52..d581802340 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -12,9 +12,9 @@
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects/js-generator.h"
-#include "src/objects/module.h"
-#include "src/utils/allocation.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/source-text-module.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -324,7 +324,7 @@ Node* IntrinsicsGenerator::GetImportMetaObject(
Node* const module =
__ LoadContextElement(module_context, Context::EXTENSION_INDEX);
Node* const import_meta =
- __ LoadObjectField(module, Module::kImportMetaOffset);
+ __ LoadObjectField(module, SourceTextModule::kImportMetaOffset);
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 9e06d95fde..eb91ae06a4 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -112,7 +112,7 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) {
CHECK(code_entry == kNullAddress ||
InstructionStream::PcIsOffHeap(isolate_, code_entry));
}
-#endif // ENABLE_SLOW_DCHECKS
+#endif // DEBUG
return;
}
@@ -230,12 +230,12 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
return SUCCEEDED;
}
-UnoptimizedCompilationJob* Interpreter::NewCompilationJob(
+std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals) {
- return new InterpreterCompilationJob(parse_info, literal, allocator,
- eager_inner_literals);
+ return base::make_unique<InterpreterCompilationJob>(
+ parse_info, literal, allocator, eager_inner_literals);
}
void Interpreter::ForEachBytecode(
@@ -290,14 +290,9 @@ bool Interpreter::IsDispatchTableInitialized() const {
}
const char* Interpreter::LookupNameOfBytecodeHandler(const Code code) {
-#ifdef ENABLE_DISASSEMBLER
-#define RETURN_NAME(Name, ...) \
- if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code.entry()) { \
- return #Name; \
+ if (code.kind() == Code::BYTECODE_HANDLER) {
+ return Builtins::name(code.builtin_index());
}
- BYTECODE_LIST(RETURN_NAME)
-#undef RETURN_NAME
-#endif // ENABLE_DISASSEMBLER
return nullptr;
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 8c6216b6a6..e8c494a6ce 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -43,7 +43,7 @@ class Interpreter {
// Creates a compilation job which will generate bytecode for |literal|.
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
// compilable inner FunctionLiterals to this list.
- static UnoptimizedCompilationJob* NewCompilationJob(
+ static std::unique_ptr<UnoptimizedCompilationJob> NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals);
@@ -60,8 +60,8 @@ class Interpreter {
// GC support.
void IterateDispatchTable(RootVisitor* v);
- // Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
- const char* LookupNameOfBytecodeHandler(const Code code);
+ // Disassembler support.
+ V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code);
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
diff --git a/deps/v8/src/json/OWNERS b/deps/v8/src/json/OWNERS
index 9a078e6d10..bc56882a9a 100644
--- a/deps/v8/src/json/OWNERS
+++ b/deps/v8/src/json/OWNERS
@@ -1,3 +1,6 @@
ishell@chromium.org
+jkummerow@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index 83bacc81a6..fa2118af1e 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -4,8 +4,8 @@
#include "src/json/json-parser.h"
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
-#include "src/execution/message-template.h"
#include "src/numbers/conversions.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/field-type.h"
@@ -499,7 +499,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Representation expected_representation = details.representation();
if (!value->FitsRepresentation(expected_representation)) {
- Representation representation = value->OptimalRepresentation();
+ Representation representation = value->OptimalRepresentation(isolate());
representation = representation.generalize(expected_representation);
if (!expected_representation.CanBeInPlaceChangedTo(representation)) {
map = ParentOfDescriptorOwner(isolate_, map, target, descriptor);
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 2280292332..a021fbbc1b 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -4,7 +4,7 @@
#include "src/json/json-stringifier.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
@@ -75,7 +75,8 @@ class JsonStringifier {
return SerializeDouble(object->value());
}
- Result SerializeJSValue(Handle<JSValue> object, Handle<Object> key);
+ Result SerializeJSPrimitiveWrapper(Handle<JSPrimitiveWrapper> object,
+ Handle<Object> key);
V8_INLINE Result SerializeJSArray(Handle<JSArray> object, Handle<Object> key);
V8_INLINE Result SerializeJSObject(Handle<JSObject> object,
@@ -257,8 +258,9 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
if (element->IsNumber() || element->IsString()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, key, Object::ToString(isolate_, element), false);
- } else if (element->IsJSValue()) {
- Handle<Object> value(Handle<JSValue>::cast(element)->value(), isolate_);
+ } else if (element->IsJSPrimitiveWrapper()) {
+ Handle<Object> value(Handle<JSPrimitiveWrapper>::cast(element)->value(),
+ isolate_);
if (value->IsNumber() || value->IsString()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, key, Object::ToString(isolate_, element), false);
@@ -281,8 +283,9 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
bool JsonStringifier::InitializeGap(Handle<Object> gap) {
DCHECK_NULL(gap_);
HandleScope scope(isolate_);
- if (gap->IsJSValue()) {
- Handle<Object> value(Handle<JSValue>::cast(gap)->value(), isolate_);
+ if (gap->IsJSPrimitiveWrapper()) {
+ Handle<Object> value(Handle<JSPrimitiveWrapper>::cast(gap)->value(),
+ isolate_);
if (value->IsString()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap,
Object::ToString(isolate_, gap), false);
@@ -558,9 +561,10 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
case JS_ARRAY_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeJSArray(Handle<JSArray>::cast(object), key);
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSValue(Handle<JSValue>::cast(object), key);
+ return SerializeJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper>::cast(object), key);
case SYMBOL_TYPE:
return UNCHANGED;
default:
@@ -583,8 +587,8 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
UNREACHABLE();
}
-JsonStringifier::Result JsonStringifier::SerializeJSValue(
- Handle<JSValue> object, Handle<Object> key) {
+JsonStringifier::Result JsonStringifier::SerializeJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper> object, Handle<Object> key) {
Object raw = object->value();
if (raw.IsString()) {
Handle<Object> value;
diff --git a/deps/v8/src/libplatform/tracing/OWNERS b/deps/v8/src/libplatform/tracing/OWNERS
new file mode 100644
index 0000000000..507f904088
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/OWNERS
@@ -0,0 +1 @@
+petermarshall@chromium.org
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
index 99db86a7d1..94b74ef255 100644
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
@@ -8,6 +8,8 @@
#include "base/trace_event/common/trace_event_common.h"
#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "perfetto/trace/trace.pb.h"
+#include "perfetto/tracing.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -111,7 +113,7 @@ void JSONTraceEventListener::AppendArgValue(
}
void JSONTraceEventListener::ProcessPacket(
- const ::perfetto::protos::ChromeTracePacket& packet) {
+ const ::perfetto::protos::TracePacket& packet) {
for (const ::perfetto::protos::ChromeTraceEvent& event :
packet.chrome_events().trace_events()) {
if (append_comma_) *stream_ << ",";
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
index fc4979f14c..d13332871f 100644
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
@@ -26,10 +26,9 @@ class JSONTraceEventListener final : public TraceEventListener {
explicit JSONTraceEventListener(std::ostream* stream);
~JSONTraceEventListener() override;
- private:
- void ProcessPacket(
- const ::perfetto::protos::ChromeTracePacket& packet) override;
+ void ProcessPacket(const ::perfetto::protos::TracePacket& packet) override;
+ private:
// Internal implementation
void AppendJSONString(const char* str);
void AppendArgValue(const ::perfetto::protos::ChromeTraceEvent_Arg& arg);
diff --git a/deps/v8/src/libplatform/tracing/perfetto-consumer.cc b/deps/v8/src/libplatform/tracing/perfetto-consumer.cc
deleted file mode 100644
index 8071fe52d5..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-consumer.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-consumer.h"
-
-#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
-#include "perfetto/tracing/core/trace_packet.h"
-#include "src/base/macros.h"
-#include "src/base/platform/semaphore.h"
-#include "src/libplatform/tracing/trace-event-listener.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoConsumer::PerfettoConsumer(base::Semaphore* finished)
- : finished_semaphore_(finished) {}
-
-void PerfettoConsumer::OnTraceData(std::vector<::perfetto::TracePacket> packets,
- bool has_more) {
- for (const ::perfetto::TracePacket& packet : packets) {
- perfetto::protos::ChromeTracePacket proto_packet;
- bool success = packet.Decode(&proto_packet);
- USE(success);
- DCHECK(success);
-
- for (TraceEventListener* listener : listeners_) {
- listener->ProcessPacket(proto_packet);
- }
- }
- // PerfettoTracingController::StopTracing() waits on this sempahore. This is
- // so that we can ensure that this consumer has finished consuming all of the
- // trace events from the buffer before the buffer is destroyed.
- if (!has_more) finished_semaphore_->Signal();
-}
-
-void PerfettoConsumer::AddTraceEventListener(TraceEventListener* listener) {
- listeners_.push_back(listener);
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-consumer.h b/deps/v8/src/libplatform/tracing/perfetto-consumer.h
deleted file mode 100644
index 83d0c48c1b..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-consumer.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
-
-#include <memory>
-
-#include "perfetto/tracing/core/consumer.h"
-#include "perfetto/tracing/core/tracing_service.h"
-#include "src/base/logging.h"
-
-namespace perfetto {
-namespace protos {
-class ChromeTracePacket;
-} // namespace protos
-} // namespace perfetto
-
-namespace v8 {
-
-namespace base {
-class Semaphore;
-}
-
-namespace platform {
-namespace tracing {
-
-class TraceEventListener;
-
-// A Perfetto Consumer gets streamed trace events from the Service via
-// OnTraceData(). A Consumer can be configured (via
-// service_endpoint()->EnableTracing()) to listen to various different types of
-// trace events. The Consumer is responsible for producing whatever tracing
-// output the system should have.
-
-// Implements the V8-specific logic for interacting with the tracing controller
-// and directs trace events to the added TraceEventListeners.
-class PerfettoConsumer final : public ::perfetto::Consumer {
- public:
- explicit PerfettoConsumer(base::Semaphore* finished);
-
- using ServiceEndpoint = ::perfetto::TracingService::ConsumerEndpoint;
-
- // Register a trace event listener that will receive trace events from this
- // consumer. This can be called multiple times to register multiple listeners,
- // but must be called before starting tracing.
- void AddTraceEventListener(TraceEventListener* listener);
-
- ServiceEndpoint* service_endpoint() const { return service_endpoint_.get(); }
- void set_service_endpoint(std::unique_ptr<ServiceEndpoint> endpoint) {
- service_endpoint_ = std::move(endpoint);
- }
-
- private:
- // ::perfetto::Consumer implementation
- void OnConnect() override {}
- void OnDisconnect() override {}
- void OnTracingDisabled() override {}
- void OnTraceData(std::vector<::perfetto::TracePacket> packets,
- bool has_more) override;
- void OnDetach(bool success) override {}
- void OnAttach(bool success, const ::perfetto::TraceConfig&) override {}
- void OnTraceStats(bool success, const ::perfetto::TraceStats&) override {
- UNREACHABLE();
- }
- void OnObservableEvents(const ::perfetto::ObservableEvents&) override {
- UNREACHABLE();
- }
-
- std::unique_ptr<ServiceEndpoint> service_endpoint_;
- base::Semaphore* finished_semaphore_;
- std::vector<TraceEventListener*> listeners_;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-producer.cc b/deps/v8/src/libplatform/tracing/perfetto-producer.cc
deleted file mode 100644
index 814dca6b59..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-producer.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-producer.h"
-
-#include "perfetto/tracing/core/data_source_config.h"
-#include "perfetto/tracing/core/data_source_descriptor.h"
-#include "perfetto/tracing/core/trace_writer.h"
-#include "src/libplatform/tracing/perfetto-tasks.h"
-#include "src/libplatform/tracing/perfetto-tracing-controller.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-void PerfettoProducer::OnConnect() {
- ::perfetto::DataSourceDescriptor ds_desc;
- ds_desc.set_name("v8.trace_events");
- service_endpoint_->RegisterDataSource(ds_desc);
-}
-
-void PerfettoProducer::StartDataSource(
- ::perfetto::DataSourceInstanceID, const ::perfetto::DataSourceConfig& cfg) {
- target_buffer_ = cfg.target_buffer();
- tracing_controller_->OnProducerReady();
-}
-
-void PerfettoProducer::StopDataSource(::perfetto::DataSourceInstanceID) {
- target_buffer_ = 0;
-}
-
-std::unique_ptr<::perfetto::TraceWriter> PerfettoProducer::CreateTraceWriter()
- const {
- CHECK_NE(0, target_buffer_);
- return service_endpoint_->CreateTraceWriter(target_buffer_);
-}
-
-PerfettoProducer::PerfettoProducer(
- PerfettoTracingController* tracing_controller)
- : tracing_controller_(tracing_controller) {}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-producer.h b/deps/v8/src/libplatform/tracing/perfetto-producer.h
deleted file mode 100644
index 2a363e8bf8..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-producer.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
-
-#include <atomic>
-#include <memory>
-
-#include "perfetto/tracing/core/producer.h"
-#include "perfetto/tracing/core/tracing_service.h"
-#include "src/base/logging.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-class PerfettoTracingController;
-
-class PerfettoProducer final : public ::perfetto::Producer {
- public:
- using ServiceEndpoint = ::perfetto::TracingService::ProducerEndpoint;
-
- explicit PerfettoProducer(PerfettoTracingController* tracing_controller);
-
- ServiceEndpoint* service_endpoint() const { return service_endpoint_.get(); }
- void set_service_endpoint(std::unique_ptr<ServiceEndpoint> endpoint) {
- service_endpoint_ = std::move(endpoint);
- }
-
- // Create a TraceWriter for the calling thread. The TraceWriter is a
- // thread-local object that writes data into a buffer which is shared between
- // all TraceWriters for a given PerfettoProducer instance. Can only be called
- // after the StartDataSource() callback has been received from the service, as
- // this provides the buffer.
- std::unique_ptr<::perfetto::TraceWriter> CreateTraceWriter() const;
-
- private:
- // ::perfetto::Producer implementation
- void OnConnect() override;
- void OnDisconnect() override {}
- void OnTracingSetup() override {}
- void SetupDataSource(::perfetto::DataSourceInstanceID,
- const ::perfetto::DataSourceConfig&) override {}
- void StartDataSource(::perfetto::DataSourceInstanceID,
- const ::perfetto::DataSourceConfig& cfg) override;
- void StopDataSource(::perfetto::DataSourceInstanceID) override;
- // TODO(petermarshall): Implement Flush(). A final flush happens when the
- // TraceWriter object for each thread is destroyed, but this will be more
- // efficient.
- void Flush(::perfetto::FlushRequestID,
- const ::perfetto::DataSourceInstanceID*, size_t) override {}
-
- void ClearIncrementalState(
- const ::perfetto::DataSourceInstanceID* data_source_ids,
- size_t num_data_sources) override {
- UNREACHABLE();
- }
-
- std::unique_ptr<ServiceEndpoint> service_endpoint_;
- uint32_t target_buffer_ = 0;
- PerfettoTracingController* tracing_controller_;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc
deleted file mode 100644
index 6c31c05070..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-shared-memory.h"
-
-#include "src/base/platform/platform.h"
-#include "src/base/template-utils.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoSharedMemory::PerfettoSharedMemory(size_t size)
- : size_(size),
- paged_memory_(::perfetto::base::PagedMemory::Allocate(size)) {
- // TODO(956543): Find a cross-platform solution.
- // TODO(petermarshall): Don't assume that size is page-aligned.
-}
-
-std::unique_ptr<::perfetto::SharedMemory>
-PerfettoSharedMemoryFactory::CreateSharedMemory(size_t size) {
- return base::make_unique<PerfettoSharedMemory>(size);
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h
deleted file mode 100644
index 7a987cc7f0..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
-
-#include "perfetto/tracing/core/shared_memory.h"
-
-#include "third_party/perfetto/include/perfetto/base/paged_memory.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-// Perfetto requires a shared memory implementation for multi-process embedders
-// but V8 is single process. We implement it here using PagedMemory from
-// perfetto.
-class PerfettoSharedMemory : public ::perfetto::SharedMemory {
- public:
- explicit PerfettoSharedMemory(size_t size);
-
- // The PagedMemory destructor will free the underlying memory when this object
- // is destroyed.
-
- void* start() const override { return paged_memory_.Get(); }
- size_t size() const override { return size_; }
-
- private:
- size_t size_;
- ::perfetto::base::PagedMemory paged_memory_;
-};
-
-class PerfettoSharedMemoryFactory : public ::perfetto::SharedMemory::Factory {
- public:
- ~PerfettoSharedMemoryFactory() override = default;
- std::unique_ptr<::perfetto::SharedMemory> CreateSharedMemory(
- size_t size) override;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tasks.cc b/deps/v8/src/libplatform/tracing/perfetto-tasks.cc
deleted file mode 100644
index 70d00ed626..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tasks.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-tasks.h"
-
-#include "src/base/platform/semaphore.h"
-#include "src/base/platform/time.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoTaskRunner::PerfettoTaskRunner() : runner_(1, DefaultTimeFunction) {}
-
-PerfettoTaskRunner::~PerfettoTaskRunner() { runner_.Terminate(); }
-
-// static
-double PerfettoTaskRunner::DefaultTimeFunction() {
- return (base::TimeTicks::HighResolutionNow() - base::TimeTicks())
- .InSecondsF();
-}
-
-void PerfettoTaskRunner::PostTask(std::function<void()> f) {
- runner_.PostTask(base::make_unique<TracingTask>(std::move(f)));
-}
-
-void PerfettoTaskRunner::PostDelayedTask(std::function<void()> f,
- uint32_t delay_ms) {
- double delay_in_seconds =
- delay_ms / static_cast<double>(base::Time::kMillisecondsPerSecond);
- runner_.PostDelayedTask(base::make_unique<TracingTask>(std::move(f)),
- delay_in_seconds);
-}
-
-bool PerfettoTaskRunner::RunsTasksOnCurrentThread() const {
- return runner_.RunsTasksOnCurrentThread();
-}
-
-void PerfettoTaskRunner::FinishImmediateTasks() {
- DCHECK(!RunsTasksOnCurrentThread());
- base::Semaphore semaphore(0);
- // PostTask has guaranteed ordering so this will be the last task executed.
- runner_.PostTask(
- base::make_unique<TracingTask>([&semaphore] { semaphore.Signal(); }));
-
- semaphore.Wait();
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tasks.h b/deps/v8/src/libplatform/tracing/perfetto-tasks.h
deleted file mode 100644
index 054a9e157a..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tasks.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
-
-#include <functional>
-
-#include "include/v8-platform.h"
-#include "perfetto/base/task_runner.h"
-#include "src/libplatform/default-worker-threads-task-runner.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-class TracingTask : public Task {
- public:
- explicit TracingTask(std::function<void()> f) : f_(std::move(f)) {}
-
- void Run() override { f_(); }
-
- private:
- std::function<void()> f_;
-};
-
-class PerfettoTaskRunner : public ::perfetto::base::TaskRunner {
- public:
- PerfettoTaskRunner();
- ~PerfettoTaskRunner() override;
-
- // ::perfetto::base::TaskRunner implementation
- void PostTask(std::function<void()> f) override;
- void PostDelayedTask(std::function<void()> f, uint32_t delay_ms) override;
- void AddFileDescriptorWatch(int fd, std::function<void()>) override {
- UNREACHABLE();
- }
- void RemoveFileDescriptorWatch(int fd) override { UNREACHABLE(); }
- bool RunsTasksOnCurrentThread() const override;
-
- // PerfettoTaskRunner implementation
- void FinishImmediateTasks();
-
- private:
- static double DefaultTimeFunction();
-
- DefaultWorkerThreadsTaskRunner runner_;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc
deleted file mode 100644
index 9b62c2ae78..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-tracing-controller.h"
-
-#include "perfetto/tracing/core/trace_config.h"
-#include "perfetto/tracing/core/trace_writer.h"
-#include "perfetto/tracing/core/tracing_service.h"
-#include "src/libplatform/tracing/perfetto-consumer.h"
-#include "src/libplatform/tracing/perfetto-producer.h"
-#include "src/libplatform/tracing/perfetto-shared-memory.h"
-#include "src/libplatform/tracing/perfetto-tasks.h"
-#include "src/libplatform/tracing/trace-event-listener.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoTracingController::PerfettoTracingController()
- : writer_key_(base::Thread::CreateThreadLocalKey()),
- producer_ready_semaphore_(0),
- consumer_finished_semaphore_(0) {}
-
-void PerfettoTracingController::StartTracing(
- const ::perfetto::TraceConfig& trace_config) {
- DCHECK(!task_runner_);
- task_runner_ = base::make_unique<PerfettoTaskRunner>();
- // The Perfetto service expects calls on the task runner thread which is why
- // the setup below occurs in posted tasks.
- task_runner_->PostTask([&trace_config, this] {
- std::unique_ptr<::perfetto::SharedMemory::Factory> shmem_factory =
- base::make_unique<PerfettoSharedMemoryFactory>();
-
- service_ = ::perfetto::TracingService::CreateInstance(
- std::move(shmem_factory), task_runner_.get());
- // This allows Perfetto to recover trace events that were written by
- // TraceWriters which have not yet been deleted. This allows us to keep
- // TraceWriters alive past the end of tracing, rather than having to delete
- // them all when tracing stops which would require synchronization on every
- // trace event. Eventually we will delete TraceWriters when threads die, but
- // for now we just leak all TraceWriters.
- service_->SetSMBScrapingEnabled(true);
- producer_ = base::make_unique<PerfettoProducer>(this);
- consumer_ =
- base::make_unique<PerfettoConsumer>(&consumer_finished_semaphore_);
-
- for (TraceEventListener* listener : listeners_) {
- consumer_->AddTraceEventListener(listener);
- }
-
- producer_->set_service_endpoint(service_->ConnectProducer(
- producer_.get(), 0, "v8.perfetto-producer", 0, true));
-
- consumer_->set_service_endpoint(
- service_->ConnectConsumer(consumer_.get(), 0));
-
- // We need to wait for the OnConnected() callbacks of the producer and
- // consumer to be called.
- consumer_->service_endpoint()->EnableTracing(trace_config);
- });
-
- producer_ready_semaphore_.Wait();
-}
-
-void PerfettoTracingController::StopTracing() {
- // Finish all of the tasks such as existing AddTraceEvent calls. These
- // require the data structures below to work properly, so keep them alive
- // until the tasks are done.
- task_runner_->FinishImmediateTasks();
-
- task_runner_->PostTask([this] {
- // Trigger shared memory buffer scraping which will get all pending trace
- // events that have been written by still-living TraceWriters.
- consumer_->service_endpoint()->DisableTracing();
- // Trigger the consumer to finish. This can trigger multiple calls to
- // PerfettoConsumer::OnTraceData(), with the final call passing has_more
- // as false.
- consumer_->service_endpoint()->ReadBuffers();
- });
-
- // Wait until the final OnTraceData() call with has_more=false has completed.
- consumer_finished_semaphore_.Wait();
-
- task_runner_->PostTask([this] {
- consumer_.reset();
- producer_.reset();
- service_.reset();
- });
-
- // Finish the above task, and any callbacks that were triggered.
- task_runner_->FinishImmediateTasks();
- task_runner_.reset();
-}
-
-void PerfettoTracingController::AddTraceEventListener(
- TraceEventListener* listener) {
- listeners_.push_back(listener);
-}
-
-PerfettoTracingController::~PerfettoTracingController() {
- base::Thread::DeleteThreadLocalKey(writer_key_);
-}
-
-::perfetto::TraceWriter*
-PerfettoTracingController::GetOrCreateThreadLocalWriter() {
- // TODO(petermarshall): Use some form of thread-local destructor so that
- // repeatedly created threads don't cause excessive leaking of TraceWriters.
- if (base::Thread::HasThreadLocal(writer_key_)) {
- return static_cast<::perfetto::TraceWriter*>(
- base::Thread::GetExistingThreadLocal(writer_key_));
- }
-
- // We leak the TraceWriter objects created for each thread. Perfetto has a
- // way of getting events from leaked TraceWriters and we can avoid needing a
- // lock on every trace event this way.
- std::unique_ptr<::perfetto::TraceWriter> tw = producer_->CreateTraceWriter();
- ::perfetto::TraceWriter* writer = tw.release();
-
- base::Thread::SetThreadLocal(writer_key_, writer);
- return writer;
-}
-
-void PerfettoTracingController::OnProducerReady() {
- producer_ready_semaphore_.Signal();
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h
deleted file mode 100644
index 67a3c26cef..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
-
-#include <atomic>
-#include <fstream>
-#include <memory>
-#include <vector>
-
-#include "src/base/platform/platform.h"
-#include "src/base/platform/semaphore.h"
-
-namespace perfetto {
-class TraceConfig;
-class TraceWriter;
-class TracingService;
-} // namespace perfetto
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-class PerfettoConsumer;
-class PerfettoProducer;
-class PerfettoTaskRunner;
-class TraceEventListener;
-
-// This is the top-level interface for performing tracing with perfetto. The
-// user of this class should call StartTracing() to start tracing, and
-// StopTracing() to stop it. To write trace events, the user can obtain a
-// thread-local TraceWriter object using GetOrCreateThreadLocalWriter().
-class PerfettoTracingController {
- public:
- PerfettoTracingController();
-
- // Blocks and sets up all required data structures for tracing. It is safe to
- // call GetOrCreateThreadLocalWriter() to obtain thread-local TraceWriters for
- // writing trace events once this call returns. Tracing output will be sent to
- // the TraceEventListeners registered via AddTraceEventListener().
- void StartTracing(const ::perfetto::TraceConfig& trace_config);
-
- // Blocks and finishes all existing AddTraceEvent tasks. Stops the tracing
- // thread.
- void StopTracing();
-
- // Register a trace event listener that will receive trace events. This can be
- // called multiple times to register multiple listeners, but must be called
- // before starting tracing.
- void AddTraceEventListener(TraceEventListener* listener);
-
- ~PerfettoTracingController();
-
- // Each thread that wants to trace should call this to get their TraceWriter.
- // PerfettoTracingController creates and owns the TraceWriter.
- ::perfetto::TraceWriter* GetOrCreateThreadLocalWriter();
-
- private:
- // Signals the producer_ready_semaphore_.
- void OnProducerReady();
-
- // PerfettoProducer is the only class allowed to call OnProducerReady().
- friend class PerfettoProducer;
-
- std::unique_ptr<::perfetto::TracingService> service_;
- std::unique_ptr<PerfettoProducer> producer_;
- std::unique_ptr<PerfettoConsumer> consumer_;
- std::unique_ptr<PerfettoTaskRunner> task_runner_;
- std::vector<TraceEventListener*> listeners_;
- base::Thread::LocalStorageKey writer_key_;
- // A semaphore that is signalled when StartRecording is called. StartTracing
- // waits on this semaphore to be notified when the tracing service is ready to
- // receive trace events.
- base::Semaphore producer_ready_semaphore_;
- base::Semaphore consumer_finished_semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(PerfettoTracingController);
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-event-listener.cc b/deps/v8/src/libplatform/tracing/trace-event-listener.cc
new file mode 100644
index 0000000000..8224221228
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-event-listener.cc
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/trace-event-listener.h"
+
+#include "perfetto/trace/trace.pb.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+void TraceEventListener::ParseFromArray(const std::vector<char>& array) {
+ perfetto::protos::Trace trace;
+ CHECK(trace.ParseFromArray(array.data(), static_cast<int>(array.size())));
+
+ for (int i = 0; i < trace.packet_size(); i++) {
+ // TODO(petermarshall): ChromeTracePacket instead.
+ const perfetto::protos::TracePacket& packet = trace.packet(i);
+ ProcessPacket(packet);
+ }
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/trace-event-listener.h b/deps/v8/src/libplatform/tracing/trace-event-listener.h
index 4acdb2935b..6a535c79c5 100644
--- a/deps/v8/src/libplatform/tracing/trace-event-listener.h
+++ b/deps/v8/src/libplatform/tracing/trace-event-listener.h
@@ -5,9 +5,11 @@
#ifndef V8_LIBPLATFORM_TRACING_TRACE_EVENT_LISTENER_H_
#define V8_LIBPLATFORM_TRACING_TRACE_EVENT_LISTENER_H_
+#include <vector>
+
namespace perfetto {
namespace protos {
-class ChromeTracePacket;
+class TracePacket;
} // namespace protos
} // namespace perfetto
@@ -23,8 +25,9 @@ namespace tracing {
class TraceEventListener {
public:
virtual ~TraceEventListener() = default;
- virtual void ProcessPacket(
- const ::perfetto::protos::ChromeTracePacket& packet) = 0;
+ virtual void ProcessPacket(const ::perfetto::protos::TracePacket& packet) = 0;
+
+ void ParseFromArray(const std::vector<char>& array);
};
} // namespace tracing
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 91d042ba1e..0700e34825 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -16,12 +16,25 @@
#include "base/trace_event/common/trace_event_common.h"
#include "perfetto/trace/chrome/chrome_trace_event.pbzero.h"
#include "perfetto/trace/trace_packet.pbzero.h"
-#include "perfetto/tracing/core/data_source_config.h"
-#include "perfetto/tracing/core/trace_config.h"
-#include "perfetto/tracing/core/trace_packet.h"
-#include "perfetto/tracing/core/trace_writer.h"
+#include "perfetto/tracing.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
#include "src/libplatform/tracing/json-trace-event-listener.h"
-#include "src/libplatform/tracing/perfetto-tracing-controller.h"
+#endif // V8_USE_PERFETTO
+
+#ifdef V8_USE_PERFETTO
+class V8DataSource : public perfetto::DataSource<V8DataSource> {
+ public:
+ void OnSetup(const SetupArgs&) override {}
+ void OnStart(const StartArgs&) override { started_.Signal(); }
+ void OnStop(const StopArgs&) override {}
+
+ static v8::base::Semaphore started_;
+};
+
+v8::base::Semaphore V8DataSource::started_{0};
+
+PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(V8DataSource);
#endif // V8_USE_PERFETTO
namespace v8 {
@@ -79,6 +92,7 @@ void TracingController::InitializeForPerfetto(std::ostream* output_stream) {
output_stream_ = output_stream;
DCHECK_NOT_NULL(output_stream);
DCHECK(output_stream->good());
+ mutex_.reset(new base::Mutex());
}
void TracingController::SetTraceEventListenerForTesting(
@@ -133,7 +147,9 @@ void AddArgsToTraceProto(
case TRACE_VALUE_TYPE_POINTER:
arg->set_pointer_value(arg_value.as_uint);
break;
- // TODO(petermarshall): Treat copy strings specially.
+ // There is no difference between copy strings and regular strings for
+ // Perfetto; the set_string_value(const char*) API will copy the string
+ // into the protobuf by default.
case TRACE_VALUE_TYPE_COPY_STRING:
case TRACE_VALUE_TYPE_STRING:
arg->set_string_value(arg_value.as_string);
@@ -171,44 +187,40 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
#ifdef V8_USE_PERFETTO
- if (perfetto_recording_.load()) {
// Don't use COMPLETE events with perfetto - instead transform them into
// BEGIN/END pairs. This avoids the need for a thread-local stack of pending
// trace events as perfetto does not support handles into the trace buffer.
if (phase == TRACE_EVENT_PHASE_COMPLETE) phase = TRACE_EVENT_PHASE_BEGIN;
- ::perfetto::TraceWriter* writer =
- perfetto_tracing_controller_->GetOrCreateThreadLocalWriter();
- // TODO(petermarshall): We shouldn't start one packet for each event.
- // We should try to bundle them together in one bundle.
- auto packet = writer->NewTracePacket();
- auto* trace_event_bundle = packet->set_chrome_events();
- auto* trace_event = trace_event_bundle->add_trace_events();
-
- trace_event->set_name(name);
- trace_event->set_timestamp(timestamp);
- trace_event->set_phase(phase);
- trace_event->set_thread_id(base::OS::GetCurrentThreadId());
- trace_event->set_duration(0);
- trace_event->set_thread_duration(0);
- if (scope) trace_event->set_scope(scope);
- trace_event->set_id(id);
- trace_event->set_flags(flags);
- if (category_enabled_flag) {
- const char* category_group_name =
- GetCategoryGroupName(category_enabled_flag);
- DCHECK_NOT_NULL(category_group_name);
- trace_event->set_category_group_name(category_group_name);
- }
- trace_event->set_process_id(base::OS::GetCurrentProcessId());
- trace_event->set_thread_timestamp(cpu_now_us);
- trace_event->set_bind_id(bind_id);
- AddArgsToTraceProto(trace_event, num_args, arg_names, arg_types, arg_values,
- arg_convertables);
+ V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
+ auto packet = ctx.NewTracePacket();
+ auto* trace_event_bundle = packet->set_chrome_events();
+ auto* trace_event = trace_event_bundle->add_trace_events();
+
+ trace_event->set_name(name);
+ trace_event->set_timestamp(timestamp);
+ trace_event->set_phase(phase);
+ trace_event->set_thread_id(base::OS::GetCurrentThreadId());
+ trace_event->set_duration(0);
+ trace_event->set_thread_duration(0);
+ if (scope) trace_event->set_scope(scope);
+ trace_event->set_id(id);
+ trace_event->set_flags(flags);
+ if (category_enabled_flag) {
+ const char* category_group_name =
+ GetCategoryGroupName(category_enabled_flag);
+ DCHECK_NOT_NULL(category_group_name);
+ trace_event->set_category_group_name(category_group_name);
+ }
+ trace_event->set_process_id(base::OS::GetCurrentProcessId());
+ trace_event->set_thread_timestamp(cpu_now_us);
+ trace_event->set_bind_id(bind_id);
- packet->Finalize();
- }
-#endif // V8_USE_PERFETTO
+ AddArgsToTraceProto(trace_event, num_args, arg_names, arg_types,
+ arg_values, arg_convertables);
+ });
+ return 0;
+#else
uint64_t handle = 0;
if (recording_.load(std::memory_order_acquire)) {
@@ -224,6 +236,7 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
}
}
return handle;
+#endif // V8_USE_PERFETTO
}
void TracingController::UpdateTraceEventDuration(
@@ -232,15 +245,8 @@ void TracingController::UpdateTraceEventDuration(
int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
#ifdef V8_USE_PERFETTO
- // TODO(petermarshall): Should we still record the end of unfinished events
- // when tracing has stopped?
- if (perfetto_recording_.load()) {
- // TODO(petermarshall): We shouldn't start one packet for each event. We
- // should try to bundle them together in one bundle.
- ::perfetto::TraceWriter* writer =
- perfetto_tracing_controller_->GetOrCreateThreadLocalWriter();
-
- auto packet = writer->NewTracePacket();
+ V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
+ auto packet = ctx.NewTracePacket();
auto* trace_event_bundle = packet->set_chrome_events();
auto* trace_event = trace_event_bundle->add_trace_events();
@@ -249,14 +255,13 @@ void TracingController::UpdateTraceEventDuration(
trace_event->set_timestamp(now_us);
trace_event->set_process_id(base::OS::GetCurrentProcessId());
trace_event->set_thread_timestamp(cpu_now_us);
-
- packet->Finalize();
- }
-#endif // V8_USE_PERFETTO
+ });
+#else
TraceObject* trace_object = trace_buffer_->GetEventByHandle(handle);
if (!trace_object) return;
trace_object->UpdateDuration(now_us, cpu_now_us);
+#endif // V8_USE_PERFETTO
}
const char* TracingController::GetCategoryGroupName(
@@ -277,24 +282,27 @@ const char* TracingController::GetCategoryGroupName(
void TracingController::StartTracing(TraceConfig* trace_config) {
#ifdef V8_USE_PERFETTO
- perfetto_tracing_controller_ = base::make_unique<PerfettoTracingController>();
-
- if (listener_for_testing_) {
- perfetto_tracing_controller_->AddTraceEventListener(listener_for_testing_);
- }
DCHECK_NOT_NULL(output_stream_);
DCHECK(output_stream_->good());
json_listener_ = base::make_unique<JSONTraceEventListener>(output_stream_);
- perfetto_tracing_controller_->AddTraceEventListener(json_listener_.get());
- ::perfetto::TraceConfig perfetto_trace_config;
+ // TODO(petermarshall): Set other the params for the config.
+ ::perfetto::TraceConfig perfetto_trace_config;
perfetto_trace_config.add_buffers()->set_size_kb(4096);
auto* ds_config = perfetto_trace_config.add_data_sources()->mutable_config();
ds_config->set_name("v8.trace_events");
- // TODO(petermarshall): Set all the params from |perfetto_trace_config|.
- perfetto_tracing_controller_->StartTracing(perfetto_trace_config);
- perfetto_recording_.store(true);
+ perfetto::DataSourceDescriptor dsd;
+ dsd.set_name("v8.trace_events");
+ V8DataSource::Register(dsd);
+
+ tracing_session_ =
+ perfetto::Tracing::NewTrace(perfetto::BackendType::kUnspecifiedBackend);
+ tracing_session_->Setup(perfetto_trace_config);
+ // TODO(petermarshall): Switch to StartBlocking when available.
+ tracing_session_->Start();
+ V8DataSource::started_.Wait();
+
#endif // V8_USE_PERFETTO
trace_config_.reset(trace_config);
@@ -315,7 +323,6 @@ void TracingController::StopTracing() {
if (!recording_.compare_exchange_strong(expected, false)) {
return;
}
- DCHECK(trace_buffer_);
UpdateCategoryGroupEnabledFlags();
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
@@ -327,16 +334,24 @@ void TracingController::StopTracing() {
}
#ifdef V8_USE_PERFETTO
- perfetto_recording_.store(false);
- perfetto_tracing_controller_->StopTracing();
- perfetto_tracing_controller_.reset();
+ base::Semaphore stopped_{0};
+ tracing_session_->SetOnStopCallback([&stopped_]() { stopped_.Signal(); });
+ tracing_session_->Stop();
+ stopped_.Wait();
+
+ std::vector<char> trace = tracing_session_->ReadTraceBlocking();
+ json_listener_->ParseFromArray(trace);
+ if (listener_for_testing_) listener_for_testing_->ParseFromArray(trace);
+
json_listener_.reset();
-#endif // V8_USE_PERFETTO
+#else
{
base::MutexGuard lock(mutex_.get());
+ DCHECK(trace_buffer_);
trace_buffer_->Flush();
}
+#endif // V8_USE_PERFETTO
}
void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
diff --git a/deps/v8/src/libsampler/OWNERS b/deps/v8/src/libsampler/OWNERS
index 87c96616bc..7ab7c063da 100644
--- a/deps/v8/src/libsampler/OWNERS
+++ b/deps/v8/src/libsampler/OWNERS
@@ -1 +1,4 @@
alph@chromium.org
+petermarshall@chromium.org
+
+# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 298d8d4446..3d517e29fc 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -13,7 +13,6 @@ namespace internal {
HR(background_marking, V8.GCBackgroundMarking, 0, 10000, 101) \
HR(background_scavenger, V8.GCBackgroundScavenger, 0, 10000, 101) \
HR(background_sweeping, V8.GCBackgroundSweeping, 0, 10000, 101) \
- HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
@@ -89,7 +88,13 @@ namespace internal {
0, 100, 32) \
/* number of code GCs triggered per native module, collected on code GC */ \
HR(wasm_module_num_triggered_code_gcs, \
- V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20)
+ V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) \
+ /* number of code spaces reserved per wasm module */ \
+ HR(wasm_module_num_code_spaces, V8.WasmModuleNumberOfCodeSpaces, 1, 128, 20) \
+ /* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
+ HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21) \
+ /* Ticks observed in a single Turbofan compilation, in 1K */ \
+ HR(turbofan_ticks, V8.TurboFan1KTicks, 0, 100000, 200)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index bfe52f45ac..1efa7105cd 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -893,6 +893,7 @@ class RuntimeCallTimer final {
V(ArrayLengthSetter) \
V(BoundFunctionLengthGetter) \
V(BoundFunctionNameGetter) \
+ V(CodeGenerationFromStringsCallbacks) \
V(CompileAnalyse) \
V(CompileBackgroundAnalyse) \
V(CompileBackgroundCompileTask) \
@@ -941,7 +942,6 @@ class RuntimeCallTimer final {
V(Invoke) \
V(InvokeApiFunction) \
V(InvokeApiInterruptCallbacks) \
- V(InvokeFunctionCallback) \
V(JS_Execution) \
V(Map_SetPrototype) \
V(Map_TransitionToAccessorProperty) \
@@ -1140,7 +1140,7 @@ class WorkerThreadRuntimeCallStats final {
// when it is destroyed.
class WorkerThreadRuntimeCallStatsScope final {
public:
- WorkerThreadRuntimeCallStatsScope(
+ explicit WorkerThreadRuntimeCallStatsScope(
WorkerThreadRuntimeCallStats* off_thread_stats);
~WorkerThreadRuntimeCallStatsScope();
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 9f8cf82d36..ecf4de6767 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -1123,10 +1123,10 @@ void Logger::SetterCallbackEvent(Name name, Address entry_point) {
namespace {
-void AppendCodeCreateHeader(Log::MessageBuilder& msg,
- CodeEventListener::LogEventsAndTags tag,
- AbstractCode::Kind kind, uint8_t* address, int size,
- base::ElapsedTimer* timer) {
+void AppendCodeCreateHeader(
+ Log::MessageBuilder& msg, // NOLINT(runtime/references)
+ CodeEventListener::LogEventsAndTags tag, AbstractCode::Kind kind,
+ uint8_t* address, int size, base::ElapsedTimer* timer) {
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
<< Logger::kNext << kLogEventsNames[tag] << Logger::kNext << kind
<< Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
@@ -1134,9 +1134,10 @@ void AppendCodeCreateHeader(Log::MessageBuilder& msg,
<< Logger::kNext;
}
-void AppendCodeCreateHeader(Log::MessageBuilder& msg,
- CodeEventListener::LogEventsAndTags tag,
- AbstractCode code, base::ElapsedTimer* timer) {
+void AppendCodeCreateHeader(
+ Log::MessageBuilder& msg, // NOLINT(runtime/references)
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code,
+ base::ElapsedTimer* timer) {
AppendCodeCreateHeader(msg, tag, code.kind(),
reinterpret_cast<uint8_t*>(code.InstructionStart()),
code.InstructionSize(), timer);
@@ -1336,8 +1337,9 @@ void Logger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
namespace {
-void CodeLinePosEvent(JitLogger* jit_logger, Address code_start,
- SourcePositionTableIterator& iter) {
+void CodeLinePosEvent(
+ JitLogger* jit_logger, Address code_start,
+ SourcePositionTableIterator& iter) { // NOLINT(runtime/references)
if (jit_logger) {
void* jit_handler_data = jit_logger->StartCodePosInfoEvent();
for (; !iter.done(); iter.Advance()) {
@@ -1415,9 +1417,10 @@ void Logger::SuspectReadEvent(Name name, Object obj) {
}
namespace {
-void AppendFunctionMessage(Log::MessageBuilder& msg, const char* reason,
- int script_id, double time_delta, int start_position,
- int end_position, base::ElapsedTimer* timer) {
+void AppendFunctionMessage(
+ Log::MessageBuilder& msg, // NOLINT(runtime/references)
+ const char* reason, int script_id, double time_delta, int start_position,
+ int end_position, base::ElapsedTimer* timer) {
msg << "function" << Logger::kNext << reason << Logger::kNext << script_id
<< Logger::kNext << start_position << Logger::kNext << end_position
<< Logger::kNext << time_delta << Logger::kNext
@@ -1665,14 +1668,14 @@ static void AddFunctionAndCode(SharedFunctionInfo sfi, AbstractCode code_object,
static int EnumerateCompiledFunctions(Heap* heap,
Handle<SharedFunctionInfo>* sfis,
Handle<AbstractCode>* code_objects) {
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
int compiled_funcs_count = 0;
// Iterate the heap to find shared function info objects and record
// the unoptimized code for them.
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
if (sfi.is_compiled() && (!sfi.script().IsScript() ||
@@ -1706,12 +1709,12 @@ static int EnumerateCompiledFunctions(Heap* heap,
static int EnumerateWasmModuleObjects(
Heap* heap, Handle<WasmModuleObject>* module_objects) {
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
int module_objects_count = 0;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsWasmModuleObject()) {
WasmModuleObject module = WasmModuleObject::cast(obj);
if (module_objects != nullptr) {
@@ -1741,10 +1744,10 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsAccessorInfo()) continue;
AccessorInfo ai = AccessorInfo::cast(obj);
if (!ai.name().IsName()) continue;
@@ -1769,9 +1772,9 @@ void Logger::LogAccessorCallbacks() {
void Logger::LogAllMaps() {
DisallowHeapAllocation no_gc;
Heap* heap = isolate_->heap();
- HeapIterator iterator(heap);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(heap);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsMap()) continue;
Map map = Map::cast(obj);
MapCreate(map);
@@ -1999,10 +2002,10 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
void ExistingCodeLogger::LogCodeObjects() {
Heap* heap = isolate_->heap();
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsCode()) LogCodeObject(obj);
if (obj.IsBytecodeArray()) LogCodeObject(obj);
}
diff --git a/deps/v8/src/numbers/OWNERS b/deps/v8/src/numbers/OWNERS
index 097b008121..df62d01730 100644
--- a/deps/v8/src/numbers/OWNERS
+++ b/deps/v8/src/numbers/OWNERS
@@ -1,5 +1,7 @@
-ahaas@chromium.org
-bmeurer@chromium.org
clemensh@chromium.org
+jgruber@chromium.org
jkummerow@chromium.org
sigurds@chromium.org
+verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/numbers/conversions.cc b/deps/v8/src/numbers/conversions.cc
index cb424a1ded..faf3e33df3 100644
--- a/deps/v8/src/numbers/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -1245,7 +1245,7 @@ char* DoubleToRadixCString(double value, int radix) {
double delta = 0.5 * (Double(value).NextDouble() - value);
delta = std::max(Double(0.0).NextDouble(), delta);
DCHECK_GT(delta, 0.0);
- if (fraction > delta) {
+ if (fraction >= delta) {
// Insert decimal point.
buffer[fraction_cursor++] = '.';
do {
@@ -1280,7 +1280,7 @@ char* DoubleToRadixCString(double value, int radix) {
break;
}
}
- } while (fraction > delta);
+ } while (fraction >= delta);
}
// Compute integer digits. Fill unrepresented digits with zero.
diff --git a/deps/v8/src/objects/OWNERS b/deps/v8/src/objects/OWNERS
new file mode 100644
index 0000000000..450423f878
--- /dev/null
+++ b/deps/v8/src/objects/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 041247637a..c327a35746 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -13,6 +13,7 @@
#include "src/objects/js-objects-inl.h"
#include "src/objects/name.h"
#include "src/objects/templates.h"
+#include "torque-generated/class-definitions-tq-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,12 +24,12 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo, Struct)
OBJECT_CONSTRUCTORS_IMPL(AccessorInfo, Struct)
OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo, Struct)
-OBJECT_CONSTRUCTORS_IMPL(CallHandlerInfo, Tuple3)
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(CallHandlerInfo)
CAST_ACCESSOR(AccessorInfo)
CAST_ACCESSOR(AccessCheckInfo)
CAST_ACCESSOR(InterceptorInfo)
-CAST_ACCESSOR(CallHandlerInfo)
ACCESSORS(AccessorInfo, name, Name, kNameOffset)
SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
@@ -119,9 +120,6 @@ BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
BOOL_ACCESSORS(InterceptorInfo, flags, is_named, kNamed)
BOOL_ACCESSORS(InterceptorInfo, flags, has_no_side_effect, kHasNoSideEffect)
-ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
-ACCESSORS(CallHandlerInfo, js_callback, Object, kJsCallbackOffset)
-ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
bool CallHandlerInfo::IsSideEffectFreeCallHandlerInfo() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 1d8b456a8e..518339f7d4 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_API_CALLBACKS_H_
#include "src/objects/struct.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -159,14 +160,9 @@ class InterceptorInfo : public Struct {
OBJECT_CONSTRUCTORS(InterceptorInfo, Struct);
};
-class CallHandlerInfo : public Tuple3 {
+class CallHandlerInfo
+ : public TorqueGeneratedCallHandlerInfo<CallHandlerInfo, Struct> {
public:
- DECL_ACCESSORS(callback, Object)
- DECL_ACCESSORS(js_callback, Object)
- DECL_ACCESSORS(data, Object)
-
- DECL_CAST(CallHandlerInfo)
-
inline bool IsSideEffectFreeCallHandlerInfo() const;
inline bool IsSideEffectCallHandlerInfo() const;
inline void SetNextCallHasNoSideEffect();
@@ -180,11 +176,7 @@ class CallHandlerInfo : public Tuple3 {
Address redirected_callback() const;
- static const int kCallbackOffset = kValue1Offset;
- static const int kJsCallbackOffset = kValue2Offset;
- static const int kDataOffset = kValue3Offset;
-
- OBJECT_CONSTRUCTORS(CallHandlerInfo, Tuple3);
+ TQ_OBJECT_CONSTRUCTORS(CallHandlerInfo)
};
} // namespace internal
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index c2ef59a896..2931c5b0a0 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -29,12 +29,14 @@ CAST_ACCESSOR(JSArgumentsObject)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot,
kAliasedContextSlotOffset)
-Context SloppyArgumentsElements::context() {
- return Context::cast(get(kContextIndex));
+DEF_GETTER(SloppyArgumentsElements, context, Context) {
+ return TaggedField<Context>::load(isolate, *this,
+ OffsetOfElementAt(kContextIndex));
}
-FixedArray SloppyArgumentsElements::arguments() {
- return FixedArray::cast(get(kArgumentsIndex));
+DEF_GETTER(SloppyArgumentsElements, arguments, FixedArray) {
+ return TaggedField<FixedArray>::load(isolate, *this,
+ OffsetOfElementAt(kArgumentsIndex));
}
void SloppyArgumentsElements::set_arguments(FixedArray arguments) {
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index a1d39f1f36..79d2e604bd 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -102,8 +102,8 @@ class SloppyArgumentsElements : public FixedArray {
static const int kArgumentsIndex = 1;
static const uint32_t kParameterMapStart = 2;
- inline Context context();
- inline FixedArray arguments();
+ DECL_GETTER(context, Context)
+ DECL_GETTER(arguments, FixedArray)
inline void set_arguments(FixedArray arguments);
inline uint32_t parameter_map_length();
inline Object get_mapped_entry(uint32_t entry);
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 92b78f8821..b02c0f29d6 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -46,6 +46,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static MaybeHandle<BigInt> MakeImmutable(MaybeHandle<MutableBigInt> maybe);
static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
+ static void Canonicalize(MutableBigInt result);
+
// Allocation helpers.
static MaybeHandle<MutableBigInt> New(
Isolate* isolate, int length,
@@ -64,6 +66,10 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
SLOW_DCHECK(bigint->IsBigInt());
return Handle<MutableBigInt>::cast(bigint);
}
+ static MutableBigInt cast(Object o) {
+ SLOW_DCHECK(o.IsBigInt());
+ return MutableBigInt(o.ptr());
+ }
static MutableBigInt unchecked_cast(Object o) {
return MutableBigInt(o.ptr());
}
@@ -87,8 +93,13 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static MaybeHandle<BigInt> AbsoluteAdd(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y, bool result_sign);
+
+ static void AbsoluteAdd(MutableBigInt result, BigInt x, BigInt y);
+
static Handle<BigInt> AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y, bool result_sign);
+ static void AbsoluteSub(MutableBigInt result, BigInt x, BigInt y);
+
static MaybeHandle<MutableBigInt> AbsoluteAddOne(
Isolate* isolate, Handle<BigIntBase> x, bool sign,
MutableBigInt result_storage = MutableBigInt());
@@ -120,6 +131,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static int AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y);
+ static int AbsoluteCompare(BigIntBase x, BigIntBase y);
+
static void MultiplyAccumulate(Handle<BigIntBase> multiplicand,
digit_t multiplier,
Handle<MutableBigInt> accumulator,
@@ -223,11 +236,24 @@ NEVER_READ_ONLY_SPACE_IMPL(MutableBigInt)
#include "src/objects/object-macros-undef.h"
+template <typename T>
+MaybeHandle<T> ThrowBigIntTooBig(Isolate* isolate) {
+ // If the result of a BigInt computation is truncated to 64 bit, Turbofan
+ // can sometimes truncate intermediate results already, which can prevent
+ // those from exceeding the maximum length, effectively preventing a
+ // RangeError from being thrown. As this is a performance optimization, this
+ // behavior is accepted. To prevent the correctness fuzzer from detecting this
+ // difference, we crash the program.
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("Aborting on invalid BigInt length");
+ }
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig), T);
+}
+
MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
AllocationType allocation) {
if (length > BigInt::kMaxLength) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- MutableBigInt);
+ return ThrowBigIntTooBig<MutableBigInt>(isolate);
}
Handle<MutableBigInt> result =
Cast(isolate->factory()->NewBigInt(length, allocation));
@@ -347,32 +373,36 @@ MaybeHandle<BigInt> MutableBigInt::MakeImmutable(
}
Handle<BigInt> MutableBigInt::MakeImmutable(Handle<MutableBigInt> result) {
+ MutableBigInt::Canonicalize(*result);
+ return Handle<BigInt>::cast(result);
+}
+
+void MutableBigInt::Canonicalize(MutableBigInt result) {
// Check if we need to right-trim any leading zero-digits.
- int old_length = result->length();
+ int old_length = result.length();
int new_length = old_length;
- while (new_length > 0 && result->digit(new_length - 1) == 0) new_length--;
+ while (new_length > 0 && result.digit(new_length - 1) == 0) new_length--;
int to_trim = old_length - new_length;
if (to_trim != 0) {
- int size_delta = to_trim * kDigitSize;
- Address new_end = result->address() + BigInt::SizeFor(new_length);
- Heap* heap = result->GetHeap();
- if (!heap->IsLargeObject(*result)) {
+ int size_delta = to_trim * MutableBigInt::kDigitSize;
+ Address new_end = result.address() + BigInt::SizeFor(new_length);
+ Heap* heap = result.GetHeap();
+ if (!heap->IsLargeObject(result)) {
// We do not create a filler for objects in large object space.
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
}
- result->synchronized_set_length(new_length);
+ result.synchronized_set_length(new_length);
// Canonicalize -0n.
if (new_length == 0) {
- result->set_sign(false);
+ result.set_sign(false);
// TODO(jkummerow): If we cache a canonical 0n, return that here.
}
}
- DCHECK_IMPLIES(result->length() > 0,
- result->digit(result->length() - 1) != 0); // MSD is non-zero.
- return Handle<BigInt>(result.location());
+ DCHECK_IMPLIES(result.length() > 0,
+ result.digit(result.length() - 1) != 0); // MSD is non-zero.
}
Handle<BigInt> BigInt::Zero(Isolate* isolate) {
@@ -428,14 +458,12 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Isolate* isolate, Handle<BigInt> base,
// results.
STATIC_ASSERT(kMaxLengthBits < std::numeric_limits<digit_t>::max());
if (exponent->length() > 1) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
digit_t exp_value = exponent->digit(0);
if (exp_value == 1) return base;
if (exp_value >= kMaxLengthBits) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
STATIC_ASSERT(kMaxLengthBits <= kMaxInt);
int n = static_cast<int>(exp_value);
@@ -1130,6 +1158,26 @@ void BigInt::BigIntShortPrint(std::ostream& os) {
// Internal helpers.
+void MutableBigInt::AbsoluteAdd(MutableBigInt result, BigInt x, BigInt y) {
+ DisallowHeapAllocation no_gc;
+ digit_t carry = 0;
+ int i = 0;
+ for (; i < y.length(); i++) {
+ digit_t new_carry = 0;
+ digit_t sum = digit_add(x.digit(i), y.digit(i), &new_carry);
+ sum = digit_add(sum, carry, &new_carry);
+ result.set_digit(i, sum);
+ carry = new_carry;
+ }
+ for (; i < x.length(); i++) {
+ digit_t new_carry = 0;
+ digit_t sum = digit_add(x.digit(i), carry, &new_carry);
+ result.set_digit(i, sum);
+ carry = new_carry;
+ }
+ result.set_digit(i, carry);
+}
+
MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Isolate* isolate,
Handle<BigInt> x,
Handle<BigInt> y,
@@ -1146,22 +1194,9 @@ MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Isolate* isolate,
if (!New(isolate, x->length() + 1).ToHandle(&result)) {
return MaybeHandle<BigInt>();
}
- digit_t carry = 0;
- int i = 0;
- for (; i < y->length(); i++) {
- digit_t new_carry = 0;
- digit_t sum = digit_add(x->digit(i), y->digit(i), &new_carry);
- sum = digit_add(sum, carry, &new_carry);
- result->set_digit(i, sum);
- carry = new_carry;
- }
- for (; i < x->length(); i++) {
- digit_t new_carry = 0;
- digit_t sum = digit_add(x->digit(i), carry, &new_carry);
- result->set_digit(i, sum);
- carry = new_carry;
- }
- result->set_digit(i, carry);
+
+ AbsoluteAdd(*result, *x, *y);
+
result->set_sign(result_sign);
return MakeImmutable(result);
}
@@ -1178,24 +1213,31 @@ Handle<BigInt> MutableBigInt::AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
return result_sign == x->sign() ? x : BigInt::UnaryMinus(isolate, x);
}
Handle<MutableBigInt> result = New(isolate, x->length()).ToHandleChecked();
+
+ AbsoluteSub(*result, *x, *y);
+
+ result->set_sign(result_sign);
+ return MakeImmutable(result);
+}
+
+void MutableBigInt::AbsoluteSub(MutableBigInt result, BigInt x, BigInt y) {
+ DisallowHeapAllocation no_gc;
digit_t borrow = 0;
int i = 0;
- for (; i < y->length(); i++) {
+ for (; i < y.length(); i++) {
digit_t new_borrow = 0;
- digit_t difference = digit_sub(x->digit(i), y->digit(i), &new_borrow);
+ digit_t difference = digit_sub(x.digit(i), y.digit(i), &new_borrow);
difference = digit_sub(difference, borrow, &new_borrow);
- result->set_digit(i, difference);
+ result.set_digit(i, difference);
borrow = new_borrow;
}
- for (; i < x->length(); i++) {
+ for (; i < x.length(); i++) {
digit_t new_borrow = 0;
- digit_t difference = digit_sub(x->digit(i), borrow, &new_borrow);
- result->set_digit(i, difference);
+ digit_t difference = digit_sub(x.digit(i), borrow, &new_borrow);
+ result.set_digit(i, difference);
borrow = new_borrow;
}
DCHECK_EQ(0, borrow);
- result->set_sign(result_sign);
- return MakeImmutable(result);
}
// Adds 1 to the absolute value of {x} and sets the result's sign to {sign}.
@@ -1375,12 +1417,17 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
// Returns a positive value if abs(x) > abs(y), a negative value if
// abs(x) < abs(y), or zero if abs(x) == abs(y).
int MutableBigInt::AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y) {
- int diff = x->length() - y->length();
+ return MutableBigInt::AbsoluteCompare(*x, *y);
+}
+
+int MutableBigInt::AbsoluteCompare(BigIntBase x, BigIntBase y) {
+ DisallowHeapAllocation no_gc;
+ int diff = x.length() - y.length();
if (diff != 0) return diff;
- int i = x->length() - 1;
- while (i >= 0 && x->digit(i) == y->digit(i)) i--;
+ int i = x.length() - 1;
+ while (i >= 0 && x.digit(i) == y.digit(i)) i--;
if (i < 0) return 0;
- return x->digit(i) > y->digit(i) ? 1 : -1;
+ return x.digit(i) > y.digit(i) ? 1 : -1;
}
// Multiplies {multiplicand} with {multiplier} and adds the result to
@@ -1716,8 +1763,7 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
Handle<BigIntBase> y) {
Maybe<digit_t> maybe_shift = ToShiftAmount(y);
if (maybe_shift.IsNothing()) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
digit_t shift = maybe_shift.FromJust();
int digit_shift = static_cast<int>(shift / kDigitBits);
@@ -1727,8 +1773,7 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
(x->digit(length - 1) >> (kDigitBits - bits_shift)) != 0;
int result_length = length + digit_shift + grow;
if (result_length > kMaxLength) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
Handle<MutableBigInt> result;
if (!New(isolate, result_length).ToHandle(&result)) {
@@ -1887,8 +1932,7 @@ MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
}
// All the overflow/maximum checks above fall through to here.
if (should_throw == kThrowOnError) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- FreshlyAllocatedBigInt);
+ return ThrowBigIntTooBig<FreshlyAllocatedBigInt>(isolate);
} else {
return MaybeHandle<FreshlyAllocatedBigInt>();
}
@@ -2155,10 +2199,6 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
// the raw characters pointer (as the string might have moved).
chars = result->GetChars(no_gc);
}
- if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
- return MaybeHandle<String>();
- }
}
} while (nonzero_digit > 0);
last_digit = rest->digit(0);
@@ -2250,8 +2290,7 @@ MaybeHandle<BigInt> BigInt::AsUintN(Isolate* isolate, uint64_t n,
// If {x} is negative, simulate two's complement representation.
if (x->sign()) {
if (n > kMaxLengthBits) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
return MutableBigInt::TruncateAndSubFromPowerOfTwo(
isolate, static_cast<int>(n), x, false);
@@ -2395,8 +2434,7 @@ MaybeHandle<BigInt> BigInt::FromWords64(Isolate* isolate, int sign_bit,
int words64_count,
const uint64_t* words) {
if (words64_count < 0 || words64_count > kMaxLength / (64 / kDigitBits)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
if (words64_count == 0) return MutableBigInt::Zero(isolate);
STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
@@ -2674,5 +2712,32 @@ void BigInt::BigIntPrint(std::ostream& os) {
}
#endif // OBJECT_PRINT
+void MutableBigInt_AbsoluteAddAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ MutableBigInt::AbsoluteAdd(result, x, y);
+ MutableBigInt::Canonicalize(result);
+}
+
+int32_t MutableBigInt_AbsoluteCompare(Address x_addr, Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+
+ return MutableBigInt::AbsoluteCompare(x, y);
+}
+
+void MutableBigInt_AbsoluteSubAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ MutableBigInt::AbsoluteSub(result, x, y);
+ MutableBigInt::Canonicalize(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 3f5d35878b..a5ca514867 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -16,6 +16,12 @@
namespace v8 {
namespace internal {
+void MutableBigInt_AbsoluteAddAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr);
+int32_t MutableBigInt_AbsoluteCompare(Address x_addr, Address y_addr);
+void MutableBigInt_AbsoluteSubAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr);
+
class BigInt;
class ValueDeserializer;
class ValueSerializer;
@@ -66,6 +72,10 @@ class BigIntBase : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, BIGINT_FIELDS)
#undef BIGINT_FIELDS
+ static constexpr bool HasOptionalPadding() {
+ return FIELD_SIZE(kOptionalPaddingOffset) > 0;
+ }
+
private:
friend class ::v8::internal::BigInt; // MSVC wants full namespace.
friend class MutableBigInt;
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 0877746d11..e6f00b0fb2 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/code.h"
+#include "src/base/memory.h"
#include "src/codegen/code-desc.h"
-#include "src/common/v8memory.h"
#include "src/execution/isolate.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/dictionary.h"
@@ -29,7 +29,7 @@ OBJECT_CONSTRUCTORS_IMPL(BytecodeArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray)
OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache)
NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
@@ -39,12 +39,6 @@ CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
-CAST_ACCESSOR(SourcePositionTableWithFrameCache)
-
-ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
- kSourcePositionTableOffset)
-ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- SimpleNumberDictionary, kStackFrameCacheOffset)
int AbstractCode::raw_instruction_size() {
if (IsCode()) {
@@ -331,7 +325,9 @@ int Code::SizeIncludingMetadata() const {
}
ByteArray Code::unchecked_relocation_info() const {
- return ByteArray::unchecked_cast(READ_FIELD(*this, kRelocationInfoOffset));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return ByteArray::unchecked_cast(
+ TaggedField<HeapObject, kRelocationInfoOffset>::load(isolate, *this));
}
byte* Code::relocation_start() const {
@@ -575,7 +571,7 @@ Code Code::GetCodeFromTargetAddress(Address address) {
}
Code Code::GetObjectFromEntryAddress(Address location_of_address) {
- Address code_entry = Memory<Address>(location_of_address);
+ Address code_entry = base::Memory<Address>(location_of_address);
HeapObject code = HeapObject::FromAddress(code_entry - Code::kHeaderSize);
// Unchecked cast because we can't rely on the map currently
// not being a forwarding pointer.
@@ -622,32 +618,32 @@ void BytecodeArray::set(int index, byte value) {
WriteField<byte>(kHeaderSize + index * kCharSize, value);
}
-void BytecodeArray::set_frame_size(int frame_size) {
+void BytecodeArray::set_frame_size(int32_t frame_size) {
DCHECK_GE(frame_size, 0);
DCHECK(IsAligned(frame_size, kSystemPointerSize));
- WriteField<int>(kFrameSizeOffset, frame_size);
+ WriteField<int32_t>(kFrameSizeOffset, frame_size);
}
-int BytecodeArray::frame_size() const {
- return ReadField<int>(kFrameSizeOffset);
+int32_t BytecodeArray::frame_size() const {
+ return ReadField<int32_t>(kFrameSizeOffset);
}
int BytecodeArray::register_count() const {
- return frame_size() / kSystemPointerSize;
+ return static_cast<int>(frame_size()) / kSystemPointerSize;
}
-void BytecodeArray::set_parameter_count(int number_of_parameters) {
+void BytecodeArray::set_parameter_count(int32_t number_of_parameters) {
DCHECK_GE(number_of_parameters, 0);
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- WriteField<int>(kParameterSizeOffset,
+ WriteField<int32_t>(kParameterSizeOffset,
(number_of_parameters << kSystemPointerSizeLog2));
}
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
const {
- int register_operand =
- ReadField<int>(kIncomingNewTargetOrGeneratorRegisterOffset);
+ int32_t register_operand =
+ ReadField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset);
if (register_operand == 0) {
return interpreter::Register::invalid_value();
} else {
@@ -658,24 +654,24 @@ interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
void BytecodeArray::set_incoming_new_target_or_generator_register(
interpreter::Register incoming_new_target_or_generator_register) {
if (!incoming_new_target_or_generator_register.is_valid()) {
- WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset, 0);
+ WriteField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset, 0);
} else {
DCHECK(incoming_new_target_or_generator_register.index() <
register_count());
DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
- WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset,
+ WriteField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset,
incoming_new_target_or_generator_register.ToOperand());
}
}
int BytecodeArray::osr_loop_nesting_level() const {
- return ReadField<int8_t>(kOSRNestingLevelOffset);
+ return ReadField<int8_t>(kOsrNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- WriteField<int8_t>(kOSRNestingLevelOffset, depth);
+ WriteField<int8_t>(kOsrNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
@@ -691,10 +687,10 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
RELAXED_WRITE_INT8_FIELD(*this, kBytecodeAgeOffset, static_cast<int8_t>(age));
}
-int BytecodeArray::parameter_count() const {
+int32_t BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- return ReadField<int>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
+ return ReadField<int32_t>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
@@ -745,7 +741,9 @@ ByteArray BytecodeArray::SourcePositionTableIfCollected() const {
void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
Object maybe_table = source_position_table();
- if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return;
+ if (maybe_table.IsUndefined() || maybe_table.IsByteArray() ||
+ maybe_table.IsException())
+ return;
DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
.source_position_table());
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 89180693a5..a51a8c5b79 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -352,7 +352,8 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
isolate_ = isolate;
Object list = isolate->heap()->native_contexts_list();
- next_context_ = list.IsUndefined(isolate_) ? Context() : Context::cast(list);
+ next_context_ =
+ list.IsUndefined(isolate_) ? NativeContext() : NativeContext::cast(list);
}
Code Code::OptimizedCodeIterator::Next() {
@@ -366,8 +367,8 @@ Code Code::OptimizedCodeIterator::Next() {
next = next_context_.OptimizedCodeListHead();
Object next_context = next_context_.next_context_link();
next_context_ = next_context.IsUndefined(isolate_)
- ? Context()
- : Context::cast(next_context);
+ ? NativeContext()
+ : NativeContext::cast(next_context);
} else {
// Exhausted contexts.
return Code();
@@ -830,7 +831,8 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << reinterpret_cast<const void*>(current_address) << " @ "
<< std::setw(4) << iterator.current_offset() << " : ";
interpreter::BytecodeDecoder::Decode(
- os, reinterpret_cast<byte*>(current_address), parameter_count());
+ os, reinterpret_cast<byte*>(current_address),
+ static_cast<int>(parameter_count()));
if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
Address jump_target = base_address + iterator.GetJumpTargetOffset();
os << " (" << reinterpret_cast<void*>(jump_target) << " @ "
@@ -856,7 +858,7 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << "Constant pool (size = " << constant_pool().length() << ")\n";
#ifdef OBJECT_PRINT
if (constant_pool().length() > 0) {
- constant_pool().Print();
+ constant_pool().Print(os);
}
#endif
@@ -1084,5 +1086,15 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
UNREACHABLE();
}
+bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
+ if (length() != other.length()) return false;
+
+ for (int i = 0; i < length(); ++i) {
+ if (get(i) != other.get(i)) return false;
+ }
+
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index a950261103..2f85d4ac7b 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -476,7 +476,7 @@ class Code::OptimizedCodeIterator {
Code Next();
private:
- Context next_context_;
+ NativeContext next_context_;
Code current_code_;
Isolate* isolate_;
@@ -741,15 +741,15 @@ class BytecodeArray : public FixedArrayBase {
inline Address GetFirstBytecodeAddress();
// Accessors for frame size.
- inline int frame_size() const;
- inline void set_frame_size(int frame_size);
+ inline int32_t frame_size() const;
+ inline void set_frame_size(int32_t frame_size);
// Accessor for register count (derived from frame_size).
inline int register_count() const;
// Accessors for parameter count (including implicit 'this' receiver).
- inline int parameter_count() const;
- inline void set_parameter_count(int number_of_parameters);
+ inline int32_t parameter_count() const;
+ inline void set_parameter_count(int32_t number_of_parameters);
// Register used to pass the incoming new.target or generator object from the
// fucntion call.
@@ -828,28 +828,15 @@ class BytecodeArray : public FixedArrayBase {
// Compares only the bytecode array but not any of the header fields.
bool IsBytecodeEqual(const BytecodeArray other) const;
-// Layout description.
-#define BYTECODE_ARRAY_FIELDS(V) \
- /* Pointer fields. */ \
- V(kConstantPoolOffset, kTaggedSize) \
- V(kHandlerTableOffset, kTaggedSize) \
- V(kSourcePositionTableOffset, kTaggedSize) \
- V(kFrameSizeOffset, kIntSize) \
- V(kParameterSizeOffset, kIntSize) \
- V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
- V(kOSRNestingLevelOffset, kCharSize) \
- V(kBytecodeAgeOffset, kCharSize) \
- /* Total size. */ \
- V(kHeaderSize, 0)
-
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
- BYTECODE_ARRAY_FIELDS)
-#undef BYTECODE_ARRAY_FIELDS
+ TORQUE_GENERATED_BYTECODE_ARRAY_FIELDS)
+ static constexpr int kHeaderSize = kSize;
// InterpreterEntryTrampoline expects these fields to be next to each other
// and writes a 16-bit value to reset them.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- kOSRNestingLevelOffset + kCharSize);
+ kOsrNestingLevelOffset + kCharSize);
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
@@ -948,22 +935,11 @@ class DeoptimizationData : public FixedArray {
OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray);
};
-class SourcePositionTableWithFrameCache : public Struct {
+class SourcePositionTableWithFrameCache
+ : public TorqueGeneratedSourcePositionTableWithFrameCache<
+ SourcePositionTableWithFrameCache, Struct> {
public:
- DECL_ACCESSORS(source_position_table, ByteArray)
- DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
-
- DECL_CAST(SourcePositionTableWithFrameCache)
-
- DECL_PRINTER(SourcePositionTableWithFrameCache)
- DECL_VERIFIER(SourcePositionTableWithFrameCache)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- Struct::kHeaderSize,
- TORQUE_GENERATED_SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_FIELDS)
-
- OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Struct);
+ TQ_OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache)
};
} // namespace internal
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index b08bc938e5..a93814fee2 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -22,6 +22,12 @@ namespace internal {
CompressedObjectSlot::CompressedObjectSlot(Object* object)
: SlotBase(reinterpret_cast<Address>(&object->ptr_)) {}
+bool CompressedObjectSlot::contains_value(Address raw_value) const {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
+ return static_cast<uint32_t>(value) ==
+ static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
+}
+
Object CompressedObjectSlot::operator*() const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(address(), value));
@@ -61,54 +67,6 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
}
//
-// CompressedMapWordSlot implementation.
-//
-
-bool CompressedMapWordSlot::contains_value(Address raw_value) const {
- AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return static_cast<uint32_t>(value) ==
- static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
-}
-
-Object CompressedMapWordSlot::operator*() const {
- Tagged_t value = *location();
- return Object(DecompressTaggedPointer(address(), value));
-}
-
-void CompressedMapWordSlot::store(Object value) const {
- *location() = CompressTagged(value.ptr());
-}
-
-Object CompressedMapWordSlot::Relaxed_Load() const {
- AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return Object(DecompressTaggedPointer(address(), value));
-}
-
-void CompressedMapWordSlot::Relaxed_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value.ptr());
- AsAtomicTagged::Relaxed_Store(location(), ptr);
-}
-
-Object CompressedMapWordSlot::Acquire_Load() const {
- AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
- return Object(DecompressTaggedPointer(address(), value));
-}
-
-void CompressedMapWordSlot::Release_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value.ptr());
- AsAtomicTagged::Release_Store(location(), ptr);
-}
-
-Object CompressedMapWordSlot::Release_CompareAndSwap(Object old,
- Object target) const {
- Tagged_t old_ptr = CompressTagged(old.ptr());
- Tagged_t target_ptr = CompressTagged(target.ptr());
- Tagged_t result =
- AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
- return Object(DecompressTaggedPointer(address(), result));
-}
-
-//
// CompressedMaybeObjectSlot implementation.
//
diff --git a/deps/v8/src/objects/compressed-slots.h b/deps/v8/src/objects/compressed-slots.h
index 45df733caf..07660b1961 100644
--- a/deps/v8/src/objects/compressed-slots.h
+++ b/deps/v8/src/objects/compressed-slots.h
@@ -34,32 +34,6 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
- inline Object operator*() const;
- inline void store(Object value) const;
-
- inline Object Acquire_Load() const;
- inline Object Relaxed_Load() const;
- inline void Relaxed_Store(Object value) const;
- inline void Release_Store(Object value) const;
- inline Object Release_CompareAndSwap(Object old, Object target) const;
-};
-
-// A CompressedMapWordSlot instance describes a kTaggedSize-sized map-word field
-// ("slot") of heap objects holding a compressed tagged pointer or a Smi
-// representing forwaring pointer value.
-// This slot kind is similar to CompressedObjectSlot but decompression of
-// forwarding pointer is different.
-// Its address() is the address of the slot.
-// The slot's contents can be read and written using operator* and store().
-class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
- public:
- using TObject = Object;
-
- static constexpr bool kCanBeWeak = false;
-
- CompressedMapWordSlot() : SlotBase(kNullAddress) {}
- explicit CompressedMapWordSlot(Address ptr) : SlotBase(ptr) {}
-
// Compares memory representation of a value stored in the slot with given
// raw value without decompression.
inline bool contains_value(Address raw_value) const;
@@ -67,10 +41,9 @@ class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
inline Object operator*() const;
inline void store(Object value) const;
+ inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
inline void Relaxed_Store(Object value) const;
-
- inline Object Acquire_Load() const;
inline void Release_Store(Object value) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
};
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index bb861a1d1e..0c566dd081 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -52,9 +52,15 @@ SMI_ACCESSORS(Context, length, kLengthOffset)
CAST_ACCESSOR(NativeContext)
Object Context::get(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object Context::get(Isolate* isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
- return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
+ return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void Context::set(int index, Object value) {
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index cddbcb98c0..861e06d87f 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -44,7 +44,7 @@ bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
DCHECK(context.IsScriptContext());
int slot_index = ScopeInfo::ContextSlotIndex(
context.scope_info(), name, &result->mode, &result->init_flag,
- &result->maybe_assigned_flag);
+ &result->maybe_assigned_flag, &result->requires_brand_check);
if (slot_index >= 0) {
result->context_index = i;
@@ -105,12 +105,12 @@ ScopeInfo Context::scope_info() {
return ScopeInfo::cast(get(SCOPE_INFO_INDEX));
}
-Module Context::module() {
+SourceTextModule Context::module() {
Context current = *this;
while (!current.IsModuleContext()) {
current = current.previous();
}
- return Module::cast(current.extension());
+ return SourceTextModule::cast(current.extension());
}
JSGlobalObject Context::global_object() {
@@ -287,8 +287,10 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode,
- &flag, &maybe_assigned_flag);
+ &flag, &maybe_assigned_flag,
+ &requires_brand_check);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
if (FLAG_trace_contexts) {
@@ -338,8 +340,8 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
*index = cell_index;
*variable_mode = mode;
*init_flag = flag;
- *attributes = ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport
+ *attributes = SourceTextModuleDescriptor::GetCellIndexKind(
+ cell_index) == SourceTextModuleDescriptor::kExport
? GetAttributesForMode(mode)
: READ_ONLY;
return handle(context->module(), isolate);
@@ -394,31 +396,26 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
return Handle<Object>::null();
}
-void Context::AddOptimizedCode(Code code) {
- DCHECK(IsNativeContext());
+void NativeContext::AddOptimizedCode(Code code) {
DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(code.next_code_link().IsUndefined());
code.set_next_code_link(get(OPTIMIZED_CODE_LIST));
set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
}
-void Context::SetOptimizedCodeListHead(Object head) {
- DCHECK(IsNativeContext());
+void NativeContext::SetOptimizedCodeListHead(Object head) {
set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
-Object Context::OptimizedCodeListHead() {
- DCHECK(IsNativeContext());
+Object NativeContext::OptimizedCodeListHead() {
return get(OPTIMIZED_CODE_LIST);
}
-void Context::SetDeoptimizedCodeListHead(Object head) {
- DCHECK(IsNativeContext());
+void NativeContext::SetDeoptimizedCodeListHead(Object head) {
set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
-Object Context::DeoptimizedCodeListHead() {
- DCHECK(IsNativeContext());
+Object NativeContext::DeoptimizedCodeListHead() {
return get(DEOPTIMIZED_CODE_LIST);
}
@@ -474,19 +471,14 @@ bool Context::IsBootstrappingOrValidParentContext(Object object,
#endif
-void Context::ResetErrorsThrown() {
- DCHECK(IsNativeContext());
- set_errors_thrown(Smi::FromInt(0));
-}
-
-void Context::IncrementErrorsThrown() {
- DCHECK(IsNativeContext());
+void NativeContext::ResetErrorsThrown() { set_errors_thrown(Smi::FromInt(0)); }
+void NativeContext::IncrementErrorsThrown() {
int previous_value = errors_thrown().value();
set_errors_thrown(Smi::FromInt(previous_value + 1));
}
-int Context::GetErrorsThrown() { return errors_thrown().value(); }
+int NativeContext::GetErrorsThrown() { return errors_thrown().value(); }
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
STATIC_ASSERT(NativeContext::kScopeInfoOffset ==
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index d83e351550..0c00aba08e 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -38,9 +38,6 @@ enum ContextLookupFlags {
// Factory::NewContext.
#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(MAKE_ERROR_INDEX, JSFunction, make_error) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
@@ -48,20 +45,10 @@ enum ContextLookupFlags {
V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
V(OBJECT_CREATE, JSFunction, object_create) \
- V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
- V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
- V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
- V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
- V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
- V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
- V(OBJECT_KEYS, JSFunction, object_keys) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
- V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
promise_internal_constructor) \
V(IS_PROMISE_INDEX, JSFunction, is_promise) \
@@ -193,8 +180,10 @@ enum ContextLookupFlags {
V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_MAP_INDEX, Map, \
js_finalization_group_cleanup_iterator_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
- V(JS_WEAK_REF_MAP_INDEX, Map, js_weak_ref_map) \
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
+ V(JS_WEAK_REF_FUNCTION_INDEX, JSFunction, js_weak_ref_fun) \
+ V(JS_FINALIZATION_GROUP_FUNCTION_INDEX, JSFunction, \
+ js_finalization_group_fun) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_KEY_ITERATOR_MAP_INDEX, Map, map_key_iterator_map) \
V(MAP_KEY_VALUE_ITERATOR_MAP_INDEX, Map, map_key_value_iterator_map) \
@@ -238,12 +227,14 @@ enum ContextLookupFlags {
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
+ V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype) \
V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
+ V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
+ V(REGEXP_SPECIES_PROTECTOR_INDEX, PropertyCell, regexp_species_protector) \
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_regexp_string_iterator_prototype_map) \
- V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
- V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
+ V(SCRIPT_EXECUTION_CALLBACK_INDEX, Object, script_execution_callback) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \
@@ -302,7 +293,6 @@ enum ContextLookupFlags {
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(WASM_EXPORTED_FUNCTION_MAP_INDEX, Map, wasm_exported_function_map) \
V(WASM_EXCEPTION_CONSTRUCTOR_INDEX, JSFunction, wasm_exception_constructor) \
- V(WASM_FUNCTION_CONSTRUCTOR_INDEX, JSFunction, wasm_function_constructor) \
V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
@@ -366,6 +356,7 @@ class ScriptContextTable : public FixedArray {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
};
inline int used() const;
@@ -453,6 +444,7 @@ class Context : public HeapObject {
// Setter and getter for elements.
V8_INLINE Object get(int index) const;
+ V8_INLINE Object get(Isolate* isolate, int index) const;
V8_INLINE void set(int index, Object value);
// Setter with explicit barrier mode.
V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
@@ -531,10 +523,6 @@ class Context : public HeapObject {
static const int kNoContext = 0;
static const int kInvalidContext = 1;
- void ResetErrorsThrown();
- void IncrementErrorsThrown();
- int GetErrorsThrown();
-
// Direct slot access.
inline void set_scope_info(ScopeInfo scope_info);
@@ -553,7 +541,7 @@ class Context : public HeapObject {
// Find the module context (assuming there is one) and return the associated
// module object.
- Module module();
+ SourceTextModule module();
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
@@ -591,14 +579,6 @@ class Context : public HeapObject {
inline bool HasSameSecurityTokenAs(Context that) const;
- // The native context also stores a list of all optimized code and a
- // list of all deoptimized code, which are needed by the deoptimizer.
- V8_EXPORT_PRIVATE void AddOptimizedCode(Code code);
- void SetOptimizedCodeListHead(Object head);
- Object OptimizedCodeListHead();
- void SetDeoptimizedCodeListHead(Object head);
- Object DeoptimizedCodeListHead();
-
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
static int IntrinsicIndexForName(Handle<String> name);
@@ -703,6 +683,18 @@ class NativeContext : public Context {
class BodyDescriptor;
+ // The native context stores a list of all optimized code and a list of all
+ // deoptimized code, which are needed by the deoptimizer.
+ V8_EXPORT_PRIVATE void AddOptimizedCode(Code code);
+ void SetOptimizedCodeListHead(Object head);
+ Object OptimizedCodeListHead();
+ void SetDeoptimizedCodeListHead(Object head);
+ Object DeoptimizedCodeListHead();
+
+ void ResetErrorsThrown();
+ void IncrementErrorsThrown();
+ int GetErrorsThrown();
+
private:
STATIC_ASSERT(OffsetOfElementAt(EMBEDDER_DATA_INDEX) ==
Internals::kNativeContextEmbedderDataOffset);
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index 1cd64c1bf1..e2805d795a 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -25,13 +25,9 @@ namespace v8 {
namespace internal {
OBJECT_CONSTRUCTORS_IMPL(DescriptorArray, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(EnumCache, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(EnumCache)
CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(EnumCache)
-
-ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
-ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
ACCESSORS(DescriptorArray, enum_cache, EnumCache, kEnumCacheOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors,
@@ -106,17 +102,22 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
return RawField(OffsetOfDescriptorAt(descriptor));
}
-ObjectSlot DescriptorArray::GetKeySlot(int descriptor) {
- DCHECK_LE(descriptor, number_of_all_descriptors());
- ObjectSlot slot = GetDescriptorSlot(descriptor) + kEntryKeyIndex;
- DCHECK((*slot).IsObject());
- return slot;
+Name DescriptorArray::GetKey(int descriptor_number) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetKey(isolate, descriptor_number);
}
-Name DescriptorArray::GetKey(int descriptor_number) const {
- DCHECK(descriptor_number < number_of_descriptors());
- return Name::cast(
- get(ToKeyIndex(descriptor_number))->GetHeapObjectAssumeStrong());
+Name DescriptorArray::GetKey(Isolate* isolate, int descriptor_number) const {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ return Name::cast(EntryKeyField::Relaxed_Load(isolate, *this, entry_offset));
+}
+
+void DescriptorArray::SetKey(int descriptor_number, Name key) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ EntryKeyField::Relaxed_Store(*this, entry_offset, key);
+ WRITE_BARRIER(*this, entry_offset + kEntryKeyOffset, key);
}
int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
@@ -124,38 +125,59 @@ int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
}
Name DescriptorArray::GetSortedKey(int descriptor_number) {
- return GetKey(GetSortedKeyIndex(descriptor_number));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetSortedKey(isolate, descriptor_number);
}
-void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
- PropertyDetails details = GetDetails(descriptor_index);
- set(ToDetailsIndex(descriptor_index),
- MaybeObject::FromObject(details.set_pointer(pointer).AsSmi()));
+Name DescriptorArray::GetSortedKey(Isolate* isolate, int descriptor_number) {
+ return GetKey(isolate, GetSortedKeyIndex(descriptor_number));
}
-MaybeObjectSlot DescriptorArray::GetValueSlot(int descriptor) {
- DCHECK_LT(descriptor, number_of_descriptors());
- return MaybeObjectSlot(GetDescriptorSlot(descriptor) + kEntryValueIndex);
+void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) {
+ PropertyDetails details = GetDetails(descriptor_number);
+ SetDetails(descriptor_number, details.set_pointer(pointer));
}
Object DescriptorArray::GetStrongValue(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- return get(ToValueIndex(descriptor_number))->cast<Object>();
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetStrongValue(isolate, descriptor_number);
+}
+
+Object DescriptorArray::GetStrongValue(Isolate* isolate,
+ int descriptor_number) {
+ return GetValue(isolate, descriptor_number).cast<Object>();
}
-void DescriptorArray::SetValue(int descriptor_index, Object value) {
- set(ToValueIndex(descriptor_index), MaybeObject::FromObject(value));
+void DescriptorArray::SetValue(int descriptor_number, MaybeObject value) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ EntryValueField::Relaxed_Store(*this, entry_offset, value);
+ WEAK_WRITE_BARRIER(*this, entry_offset + kEntryValueOffset, value);
}
MaybeObject DescriptorArray::GetValue(int descriptor_number) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetValue(isolate, descriptor_number);
+}
+
+MaybeObject DescriptorArray::GetValue(Isolate* isolate, int descriptor_number) {
DCHECK_LT(descriptor_number, number_of_descriptors());
- return get(ToValueIndex(descriptor_number));
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ return EntryValueField::Relaxed_Load(isolate, *this, entry_offset);
}
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- MaybeObject details = get(ToDetailsIndex(descriptor_number));
- return PropertyDetails(details->ToSmi());
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ Smi details = EntryDetailsField::Relaxed_Load(*this, entry_offset);
+ return PropertyDetails(details);
+}
+
+void DescriptorArray::SetDetails(int descriptor_number,
+ PropertyDetails details) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ EntryDetailsField::Relaxed_Store(*this, entry_offset, details.AsSmi());
}
int DescriptorArray::GetFieldIndex(int descriptor_number) {
@@ -164,19 +186,22 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) {
}
FieldType DescriptorArray::GetFieldType(int descriptor_number) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetFieldType(isolate, descriptor_number);
+}
+
+FieldType DescriptorArray::GetFieldType(Isolate* isolate,
+ int descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
- MaybeObject wrapped_type = GetValue(descriptor_number);
+ MaybeObject wrapped_type = GetValue(isolate, descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
void DescriptorArray::Set(int descriptor_number, Name key, MaybeObject value,
PropertyDetails details) {
- // Range check.
- DCHECK(descriptor_number < number_of_descriptors());
- set(ToKeyIndex(descriptor_number), MaybeObject::FromObject(key));
- set(ToValueIndex(descriptor_number), value);
- set(ToDetailsIndex(descriptor_number),
- MaybeObject::FromObject(details.AsSmi()));
+ SetKey(descriptor_number, key);
+ SetDetails(descriptor_number, details);
+ SetValue(descriptor_number, value);
}
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
@@ -211,21 +236,6 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
SetSortedKey(second, first_key);
}
-int DescriptorArray::length() const {
- return number_of_all_descriptors() * kEntrySize;
-}
-
-MaybeObject DescriptorArray::get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_WEAK_FIELD(*this, offset(index));
-}
-
-void DescriptorArray::set(int index, MaybeObject value) {
- DCHECK(index >= 0 && index < this->length());
- RELAXED_WRITE_WEAK_FIELD(*this, offset(index), value);
- WEAK_WRITE_BARRIER(*this, offset(index), value);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 3c1fa98a37..0f17cd22ea 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -22,21 +22,11 @@ class Handle;
class Isolate;
// An EnumCache is a pair used to hold keys and indices caches.
-class EnumCache : public Struct {
+class EnumCache : public TorqueGeneratedEnumCache<EnumCache, Struct> {
public:
- DECL_ACCESSORS(keys, FixedArray)
- DECL_ACCESSORS(indices, FixedArray)
-
- DECL_CAST(EnumCache)
-
- DECL_PRINTER(EnumCache)
DECL_VERIFIER(EnumCache)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_ENUM_CACHE_FIELDS)
-
- OBJECT_CONSTRUCTORS(EnumCache, Struct);
+ TQ_OBJECT_CONSTRUCTORS(EnumCache)
};
// A DescriptorArray is a custom array that holds instance descriptors.
@@ -73,14 +63,18 @@ class DescriptorArray : public HeapObject {
// Accessors for fetching instance descriptor at descriptor number.
inline Name GetKey(int descriptor_number) const;
+ inline Name GetKey(Isolate* isolate, int descriptor_number) const;
inline Object GetStrongValue(int descriptor_number);
- inline void SetValue(int descriptor_number, Object value);
+ inline Object GetStrongValue(Isolate* isolate, int descriptor_number);
inline MaybeObject GetValue(int descriptor_number);
+ inline MaybeObject GetValue(Isolate* isolate, int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
inline FieldType GetFieldType(int descriptor_number);
+ inline FieldType GetFieldType(Isolate* isolate, int descriptor_number);
inline Name GetSortedKey(int descriptor_number);
+ inline Name GetSortedKey(Isolate* isolate, int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
@@ -153,15 +147,13 @@ class DescriptorArray : public HeapObject {
int16_t number_of_marked_descriptors);
static constexpr int SizeFor(int number_of_all_descriptors) {
- return offset(number_of_all_descriptors * kEntrySize);
+ return OffsetOfDescriptorAt(number_of_all_descriptors);
}
static constexpr int OffsetOfDescriptorAt(int descriptor) {
- return offset(descriptor * kEntrySize);
+ return kHeaderSize + descriptor * kEntrySize * kTaggedSize;
}
inline ObjectSlot GetFirstPointerSlot();
inline ObjectSlot GetDescriptorSlot(int descriptor);
- inline ObjectSlot GetKeySlot(int descriptor);
- inline MaybeObjectSlot GetValueSlot(int descriptor);
static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset,
"Weak fields follow strong fields.");
@@ -178,6 +170,10 @@ class DescriptorArray : public HeapObject {
static const int kEntryValueIndex = 2;
static const int kEntrySize = 3;
+ static const int kEntryKeyOffset = kEntryKeyIndex * kTaggedSize;
+ static const int kEntryDetailsOffset = kEntryDetailsIndex * kTaggedSize;
+ static const int kEntryValueOffset = kEntryValueIndex * kTaggedSize;
+
// Print all the descriptors.
void PrintDescriptors(std::ostream& os);
void PrintDescriptorDetails(std::ostream& os, int descriptor,
@@ -207,15 +203,16 @@ class DescriptorArray : public HeapObject {
return (descriptor_number * kEntrySize) + kEntryValueIndex;
}
+ using EntryKeyField = TaggedField<HeapObject, kEntryKeyOffset>;
+ using EntryDetailsField = TaggedField<Smi, kEntryDetailsOffset>;
+ using EntryValueField = TaggedField<MaybeObject, kEntryValueOffset>;
+
private:
DECL_INT16_ACCESSORS(filler16bits)
- // Low-level per-element accessors.
- static constexpr int offset(int index) {
- return kHeaderSize + index * kTaggedSize;
- }
- inline int length() const;
- inline MaybeObject get(int index) const;
- inline void set(int index, MaybeObject value);
+
+ inline void SetKey(int descriptor_number, Name key);
+ inline void SetValue(int descriptor_number, MaybeObject value);
+ inline void SetDetails(int descriptor_number, PropertyDetails details);
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index a1692978f3..92c1d0940f 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -98,15 +98,27 @@ RootIndex GlobalDictionaryShape::GetMapRootIndex() {
return RootIndex::kGlobalDictionaryMap;
}
-Name NameDictionary::NameAt(int entry) { return Name::cast(KeyAt(entry)); }
+Name NameDictionary::NameAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return NameAt(isolate, entry);
+}
+
+Name NameDictionary::NameAt(Isolate* isolate, int entry) {
+ return Name::cast(KeyAt(isolate, entry));
+}
RootIndex NameDictionaryShape::GetMapRootIndex() {
return RootIndex::kNameDictionaryMap;
}
PropertyCell GlobalDictionary::CellAt(int entry) {
- DCHECK(KeyAt(entry).IsPropertyCell());
- return PropertyCell::cast(KeyAt(entry));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return CellAt(isolate, entry);
+}
+
+PropertyCell GlobalDictionary::CellAt(Isolate* isolate, int entry) {
+ DCHECK(KeyAt(isolate, entry).IsPropertyCell(isolate));
+ return PropertyCell::cast(KeyAt(isolate, entry));
}
bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object k) {
@@ -118,8 +130,23 @@ bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object k) {
return IsLive(roots, k) && !PropertyCell::cast(k).value().IsTheHole(roots);
}
-Name GlobalDictionary::NameAt(int entry) { return CellAt(entry).name(); }
-Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry).value(); }
+Name GlobalDictionary::NameAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return NameAt(isolate, entry);
+}
+
+Name GlobalDictionary::NameAt(Isolate* isolate, int entry) {
+ return CellAt(isolate, entry).name(isolate);
+}
+
+Object GlobalDictionary::ValueAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return ValueAt(isolate, entry);
+}
+
+Object GlobalDictionary::ValueAt(Isolate* isolate, int entry) {
+ return CellAt(isolate, entry).value(isolate);
+}
void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object key,
Object value, PropertyDetails details) {
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index ca709f34d8..fe6001f58c 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -32,7 +32,11 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
using Key = typename Shape::Key;
// Returns the value at entry.
Object ValueAt(int entry) {
- return this->get(DerivedHashTable::EntryToIndex(entry) + 1);
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return ValueAt(isolate, entry);
+ }
+ Object ValueAt(Isolate* isolate, int entry) {
+ return this->get(isolate, DerivedHashTable::EntryToIndex(entry) + 1);
}
// Set the value for entry.
@@ -208,6 +212,8 @@ class V8_EXPORT_PRIVATE NameDictionary
static const int kInitialCapacity = 2;
inline Name NameAt(int entry);
+ inline Name NameAt(Isolate* isolate, int entry);
+
inline void set_hash(int hash);
inline int hash() const;
@@ -246,10 +252,13 @@ class V8_EXPORT_PRIVATE GlobalDictionary
DECL_CAST(GlobalDictionary)
inline Object ValueAt(int entry);
+ inline Object ValueAt(Isolate* isolate, int entry);
inline PropertyCell CellAt(int entry);
+ inline PropertyCell CellAt(Isolate* isolate, int entry);
inline void SetEntry(Isolate* isolate, int entry, Object key, Object value,
PropertyDetails details);
inline Name NameAt(int entry);
+ inline Name NameAt(Isolate* isolate, int entry);
inline void ValueAtPut(int entry, Object value);
OBJECT_CONSTRUCTORS(
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index e1232a0d5b..4bdfba052d 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -4,10 +4,10 @@
#include "src/objects/elements.h"
+#include "src/common/message-template.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize.
#include "src/heap/heap-write-barrier-inl.h"
@@ -141,6 +141,12 @@ WriteBarrierMode GetWriteBarrierMode(ElementsKind kind) {
return UPDATE_WRITE_BARRIER;
}
+// If kCopyToEndAndInitializeToHole is specified as the copy_size to
+// CopyElements, it copies all of elements from source after source_start to
+// destination array, padding any remaining uninitialized elements in the
+// destination array with the hole.
+constexpr int kCopyToEndAndInitializeToHole = -1;
+
void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
ElementsKind from_kind, uint32_t from_start,
FixedArrayBase to_base, ElementsKind to_kind,
@@ -150,17 +156,14 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
Min(from_base.length() - from_start, to_base.length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base.length() - start;
- if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
- roots.the_hole_value(), length);
- }
+ int start = to_start + copy_size;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ roots.the_hole_value(), length);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -179,24 +182,21 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
write_barrier_mode);
}
-static void CopyDictionaryToObjectElements(
- Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
- FixedArrayBase to_base, ElementsKind to_kind, uint32_t to_start,
- int raw_copy_size) {
+void CopyDictionaryToObjectElements(Isolate* isolate, FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ ElementsKind to_kind, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
NumberDictionary from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = from.max_number_key() + 1 - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base.length() - start;
- if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
- ReadOnlyRoots(isolate).the_hole_value(), length);
- }
+ int start = to_start + copy_size;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
DCHECK(to_base != from_base);
@@ -223,28 +223,23 @@ static void CopyDictionaryToObjectElements(
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
// See ElementsAccessorBase::CopyElements() for details.
-static void CopyDoubleToObjectElements(Isolate* isolate,
- FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopyDoubleToObjectElements(Isolate* isolate, FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
DisallowHeapAllocation no_allocation;
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
Min(from_base.length() - from_start, to_base.length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- // Also initialize the area that will be copied over since HeapNumber
- // allocation below can cause an incremental marking step, requiring all
- // existing heap objects to be propertly initialized.
- int start = to_start;
- int length = to_base.length() - start;
- if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
- ReadOnlyRoots(isolate).the_hole_value(), length);
- }
+ // Also initialize the area that will be copied over since HeapNumber
+ // allocation below can cause an incremental marking step, requiring all
+ // existing heap objects to be propertly initialized.
+ int start = to_start;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
@@ -272,21 +267,17 @@ static void CopyDoubleToObjectElements(Isolate* isolate,
}
}
-static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopyDoubleToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
Min(from_base.length() - from_start, to_base.length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -312,19 +303,16 @@ static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
#endif
}
-static void CopySmiToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start, FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopySmiToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = from_base.length() - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -344,25 +332,19 @@ static void CopySmiToDoubleElements(FixedArrayBase from_base,
}
}
-static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int packed_size,
- int raw_copy_size) {
+void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int packed_size,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
uint32_t to_end;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = packed_size - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- to_end = to_base.length();
- for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
- } else {
- to_end = to_start + static_cast<uint32_t>(copy_size);
+ to_end = to_base.length();
+ for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
@@ -382,20 +364,16 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
}
}
-static void CopyObjectToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopyObjectToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = from_base.length() - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -415,20 +393,17 @@ static void CopyObjectToDoubleElements(FixedArrayBase from_base,
}
}
-static void CopyDictionaryToDoubleElements(
- Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
- FixedArrayBase to_base, uint32_t to_start, int raw_copy_size) {
+void CopyDictionaryToDoubleElements(Isolate* isolate, FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
NumberDictionary from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
- DCHECK(copy_size == ElementsAccessor::kCopyToEnd ||
- copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, copy_size);
copy_size = from.max_number_key() + 1 - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
if (copy_size == 0) return;
@@ -447,17 +422,16 @@ static void CopyDictionaryToDoubleElements(
}
}
-static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
- uint32_t sort_size) {
+void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
+ uint32_t sort_size) {
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress());
AtomicSlot end(start + sort_size);
std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
#ifdef V8_COMPRESS_POINTERS
- DEFINE_ROOT_VALUE(isolate);
- Object a(DecompressTaggedAny(ROOT_VALUE, elementA));
- Object b(DecompressTaggedAny(ROOT_VALUE, elementB));
+ Object a(DecompressTaggedAny(isolate, elementA));
+ Object b(DecompressTaggedAny(isolate, elementB));
#else
Object a(elementA);
Object b(elementB);
@@ -474,10 +448,9 @@ static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
ObjectSlot(end));
}
-static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
- Handle<JSObject> receiver,
- Handle<Object> value,
- uint32_t start_from, uint32_t length) {
+Maybe<bool> IncludesValueSlowPath(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) {
bool search_for_hole = value->IsUndefined(isolate);
for (uint32_t k = start_from; k < length; ++k) {
LookupIterator it(isolate, receiver, k);
@@ -495,11 +468,9 @@ static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
return Just(false);
}
-static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
- Handle<JSObject> receiver,
- Handle<Object> value,
- uint32_t start_from,
- uint32_t length) {
+Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) {
for (uint32_t k = start_from; k < length; ++k) {
LookupIterator it(isolate, receiver, k);
if (!it.IsFound()) {
@@ -595,23 +566,6 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return true;
}
- static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
- if (!IsHoleyElementsKind(kind())) return;
- Handle<FixedArrayBase> backing_store(array->elements(),
- array->GetIsolate());
- int length = Smi::ToInt(array->length());
- if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) return;
-
- ElementsKind packed_kind = GetPackedElementsKind(kind());
- Handle<Map> new_map =
- JSObject::GetElementsTransitionMap(array, packed_kind);
- JSObject::MigrateToMap(array, new_map);
- if (FLAG_trace_elements_transitions) {
- JSObject::PrintElementsTransition(stdout, array, kind(), backing_store,
- packed_kind, backing_store);
- }
- }
-
bool HasElement(JSObject holder, uint32_t index, FixedArrayBase backing_store,
PropertyFilter filter) final {
return Subclass::HasElementImpl(holder.GetIsolate(), holder, index,
@@ -804,22 +758,14 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static Handle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity) {
- return ConvertElementsWithCapacity(
- object, old_elements, from_kind, capacity, 0, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
- }
-
- static Handle<FixedArrayBase> ConvertElementsWithCapacity(
- Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
- ElementsKind from_kind, uint32_t capacity, int copy_size) {
return ConvertElementsWithCapacity(object, old_elements, from_kind,
- capacity, 0, 0, copy_size);
+ capacity, 0, 0);
}
static Handle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity, uint32_t src_index,
- uint32_t dst_index, int copy_size) {
+ uint32_t dst_index) {
Isolate* isolate = object->GetIsolate();
Handle<FixedArrayBase> new_elements;
if (IsDoubleElementsKind(kind())) {
@@ -834,14 +780,16 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
Subclass::CopyElementsImpl(isolate, *old_elements, src_index, *new_elements,
- from_kind, dst_index, packed_size, copy_size);
+ from_kind, dst_index, packed_size,
+ kCopyToEndAndInitializeToHole);
return new_elements;
}
static void TransitionElementsKindImpl(Handle<JSObject> object,
Handle<Map> to_map) {
- Handle<Map> from_map = handle(object->map(), object->GetIsolate());
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> from_map = handle(object->map(), isolate);
ElementsKind from_kind = from_map->elements_kind();
ElementsKind to_kind = to_map->elements_kind();
if (IsHoleyElementsKind(from_kind)) {
@@ -853,14 +801,12 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DCHECK(IsFastElementsKind(to_kind));
DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
- Handle<FixedArrayBase> from_elements(object->elements(),
- object->GetIsolate());
- if (object->elements() ==
- object->GetReadOnlyRoots().empty_fixed_array() ||
+ Handle<FixedArrayBase> from_elements(object->elements(), isolate);
+ if (object->elements() == ReadOnlyRoots(isolate).empty_fixed_array() ||
IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
- JSObject::MigrateToMap(object, to_map);
+ JSObject::MigrateToMap(isolate, object, to_map);
} else {
DCHECK(
(IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
@@ -871,9 +817,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
JSObject::SetMapAndElements(object, to_map, elements);
}
if (FLAG_trace_elements_transitions) {
- JSObject::PrintElementsTransition(
- stdout, object, from_kind, from_elements, to_kind,
- handle(object->elements(), object->GetIsolate()));
+ JSObject::PrintElementsTransition(stdout, object, from_kind,
+ from_elements, to_kind,
+ handle(object->elements(), isolate));
}
}
}
@@ -2394,7 +2340,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Copy over all objects to a new backing_store.
backing_store = Subclass::ConvertElementsWithCapacity(
receiver, backing_store, KindTraits::Kind, capacity, 0,
- copy_dst_index, ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_dst_index);
receiver->set_elements(*backing_store);
} else if (add_position == AT_START) {
// If the backing store has enough capacity and we add elements to the
@@ -2639,7 +2585,7 @@ class FastSealedObjectElementsAccessor
"SlowCopyForSetLengthImpl");
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- JSObject::MigrateToMap(array, new_map);
+ JSObject::MigrateToMap(isolate, array, new_map);
if (!new_element_dictionary.is_null()) {
array->set_elements(*new_element_dictionary);
@@ -2955,7 +2901,7 @@ class TypedElementsAccessor
// fields (external pointers, doubles and BigInt data) are only
// kTaggedSize aligned so we have to use unaligned pointer friendly way of
// accessing them in order to avoid undefined behavior in C++ code.
- WriteUnalignedValue<ElementType>(
+ base::WriteUnalignedValue<ElementType>(
reinterpret_cast<Address>(data_ptr + entry), value);
} else {
data_ptr[entry] = value;
@@ -2995,7 +2941,7 @@ class TypedElementsAccessor
// fields (external pointers, doubles and BigInt data) are only
// kTaggedSize aligned so we have to use unaligned pointer friendly way of
// accessing them in order to avoid undefined behavior in C++ code.
- result = ReadUnalignedValue<ElementType>(
+ result = base::ReadUnalignedValue<ElementType>(
reinterpret_cast<Address>(data_ptr + entry));
} else {
result = data_ptr[entry];
@@ -3664,10 +3610,7 @@ Handle<Object> TypedElementsAccessor<UINT32_ELEMENTS, uint32_t>::ToHandle(
// static
template <>
float TypedElementsAccessor<FLOAT32_ELEMENTS, float>::FromScalar(double value) {
- using limits = std::numeric_limits<float>;
- if (value > limits::max()) return limits::infinity();
- if (value < limits::lowest()) return -limits::infinity();
- return static_cast<float>(value);
+ return DoubleToFloat32(value);
}
// static
@@ -4377,7 +4320,7 @@ class FastSloppyArgumentsElementsAccessor
ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity);
Handle<Map> new_map = JSObject::GetElementsTransitionMap(
object, FAST_SLOPPY_ARGUMENTS_ELEMENTS);
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
elements->set_arguments(FixedArray::cast(*arguments));
JSObject::ValidateElements(*object);
}
@@ -4549,8 +4492,8 @@ class StringWrapperElementsAccessor
private:
static String GetString(JSObject holder) {
- DCHECK(holder.IsJSValue());
- JSValue js_value = JSValue::cast(holder);
+ DCHECK(holder.IsJSPrimitiveWrapper());
+ JSPrimitiveWrapper js_value = JSPrimitiveWrapper::cast(holder);
DCHECK(js_value.value().IsString());
return String::cast(js_value.value());
}
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
index 844cd2ed94..a72a6b068e 100644
--- a/deps/v8/src/objects/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -66,15 +66,6 @@ class ElementsAccessor {
// element that is non-deletable.
virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
- // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
- // of elements from source after source_start to the destination array.
- static const int kCopyToEnd = -1;
- // If kCopyToEndAndInitializeToHole is specified as the copy_size to
- // CopyElements, it copies all of elements from source after source_start to
- // destination array, padding any remaining uninitialized elements in the
- // destination array with the hole.
- static const int kCopyToEndAndInitializeToHole = -2;
-
// Copy all indices that have elements from |object| into the given
// KeyAccumulator. For Dictionary-based element-kinds we filter out elements
// whose PropertyAttribute match |filter|.
@@ -210,7 +201,7 @@ class ElementsAccessor {
uint32_t destination_start, int copy_size) = 0;
private:
- static ElementsAccessor** elements_accessors_;
+ V8_EXPORT_PRIVATE static ElementsAccessor** elements_accessors_;
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index 6830a4d22e..78189ba381 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/embedder-data-slot.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/embedder-data-array.h"
#include "src/objects/js-objects-inl.h"
@@ -77,7 +77,7 @@ bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
- Address raw_value = ReadUnalignedValue<Address>(address());
+ Address raw_value = base::ReadUnalignedValue<Address>(address());
#else
Address raw_value = *location();
#endif
@@ -103,7 +103,7 @@ EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
- return ReadUnalignedValue<Address>(address());
+ return base::ReadUnalignedValue<Address>(address());
#else
return *location();
#endif
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 6b1fdcc1e5..9cdc03b5c2 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -155,13 +155,22 @@ FeedbackSlot FeedbackVector::ToSlot(int index) {
}
MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
- return get(GetIndex(slot));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return Get(isolate, slot);
+}
+
+MaybeObject FeedbackVector::Get(Isolate* isolate, FeedbackSlot slot) const {
+ return get(isolate, GetIndex(slot));
}
MaybeObject FeedbackVector::get(int index) const {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+MaybeObject FeedbackVector::get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = OffsetOfElementAt(index);
return RELAXED_READ_WEAK_FIELD(*this, offset);
}
@@ -180,7 +189,7 @@ void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value,
void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
- int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
}
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 0393a55f69..4f4826eab3 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -374,6 +374,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
}
bool FeedbackVector::ClearSlots(Isolate* isolate) {
+ if (!shared_function_info().HasFeedbackMetadata()) return false;
MaybeObject uninitialized_sentinel = MaybeObject::FromObject(
FeedbackVector::RawUninitializedSentinel(isolate));
@@ -943,6 +944,7 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+ DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
MaybeObject feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
@@ -986,19 +988,22 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
return 0;
}
-MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
+int FeedbackNexus::ExtractMapsAndHandlers(MapHandles* maps,
+ MaybeObjectHandles* handlers) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
- IsKeyedHasICKind(kind()));
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
- MaybeObject feedback = GetFeedback();
+ DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
+ MaybeObject feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object.IsWeakFixedArray()) ||
is_named_feedback) {
+ int found = 0;
WeakFixedArray array;
if (is_named_feedback) {
array =
@@ -1011,36 +1016,39 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
for (int i = 0; i < array.length(); i += increment) {
DCHECK(array.Get(i)->IsWeakOrCleared());
if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
- Map array_map = Map::cast(heap_object);
- if (array_map == *map && !array.Get(i + increment - 1)->IsCleared()) {
- MaybeObject handler = array.Get(i + increment - 1);
+ MaybeObject handler = array.Get(i + 1);
+ if (!handler->IsCleared()) {
DCHECK(IC::IsHandler(handler));
- return handle(handler, isolate);
+ Map map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ handlers->push_back(handle(handler, isolate));
+ found++;
}
}
}
+ return found;
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- Map cell_map = Map::cast(heap_object);
- if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) {
- MaybeObject handler = GetFeedbackExtra();
+ MaybeObject handler = GetFeedbackExtra();
+ if (!handler->IsCleared()) {
DCHECK(IC::IsHandler(handler));
- return handle(handler, isolate);
+ Map map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ handlers->push_back(handle(handler, isolate));
+ return 1;
}
}
- return MaybeObjectHandle();
+ return 0;
}
-bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
- int length) const {
+MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
- IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+ IsKeyedHasICKind(kind()));
MaybeObject feedback = GetFeedback();
Isolate* isolate = GetIsolate();
- int count = 0;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
@@ -1056,25 +1064,26 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
const int increment = 2;
HeapObject heap_object;
for (int i = 0; i < array.length(); i += increment) {
- // Be sure to skip handlers whose maps have been cleared.
DCHECK(array.Get(i)->IsWeakOrCleared());
- if (array.Get(i)->GetHeapObjectIfWeak(&heap_object) &&
- !array.Get(i + increment - 1)->IsCleared()) {
- MaybeObject handler = array.Get(i + increment - 1);
- DCHECK(IC::IsHandler(handler));
- code_list->push_back(handle(handler, isolate));
- count++;
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
+ Map array_map = Map::cast(heap_object);
+ if (array_map == *map && !array.Get(i + increment - 1)->IsCleared()) {
+ MaybeObject handler = array.Get(i + increment - 1);
+ DCHECK(IC::IsHandler(handler));
+ return handle(handler, isolate);
+ }
}
}
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- MaybeObject extra = GetFeedbackExtra();
- if (!extra->IsCleared()) {
- DCHECK(IC::IsHandler(extra));
- code_list->push_back(handle(extra, isolate));
- count++;
+ Map cell_map = Map::cast(heap_object);
+ if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) {
+ MaybeObject handler = GetFeedbackExtra();
+ DCHECK(IC::IsHandler(handler));
+ return handle(handler, isolate);
}
}
- return count == length;
+
+ return MaybeObjectHandle();
}
Name FeedbackNexus::GetName() const {
@@ -1095,8 +1104,7 @@ KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
if (GetKeyType() == PROPERTY) return STANDARD_LOAD;
- ExtractMaps(&maps);
- FindHandlers(&handlers, static_cast<int>(maps.size()));
+ ExtractMapsAndHandlers(&maps, &handlers);
for (MaybeObjectHandle const& handler : handlers) {
KeyedAccessLoadMode mode = LoadHandler::GetKeyedAccessLoadMode(*handler);
if (mode != STANDARD_LOAD) return mode;
@@ -1179,8 +1187,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
if (GetKeyType() == PROPERTY) return mode;
- ExtractMaps(&maps);
- FindHandlers(&handlers, static_cast<int>(maps.size()));
+ ExtractMapsAndHandlers(&maps, &handlers);
for (const MaybeObjectHandle& maybe_code_handler : handlers) {
// The first handler that isn't the slow handler will have the bits we need.
Handle<Code> handler;
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 89e0b9e6aa..af03bb4130 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -233,7 +233,9 @@ class FeedbackVector : public HeapObject {
// Conversion from an integer index to the underlying array to a slot.
static inline FeedbackSlot ToSlot(int index);
inline MaybeObject Get(FeedbackSlot slot) const;
+ inline MaybeObject Get(Isolate* isolate, FeedbackSlot slot) const;
inline MaybeObject get(int index) const;
+ inline MaybeObject get(Isolate* isolate, int index) const;
inline void Set(FeedbackSlot slot, MaybeObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void set(int index, MaybeObject value,
@@ -322,11 +324,13 @@ class FeedbackVector : public HeapObject {
class BodyDescriptor;
- // Garbage collection support.
- static constexpr int SizeFor(int length) {
- return kFeedbackSlotsOffset + length * kTaggedSize;
+ static constexpr int OffsetOfElementAt(int index) {
+ return kFeedbackSlotsOffset + index * kTaggedSize;
}
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) { return OffsetOfElementAt(length); }
+
private:
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
@@ -561,6 +565,7 @@ class FeedbackMetadata : public HeapObject {
// Verify that an empty hash field looks like a tagged object, but can't
// possibly be confused with a pointer.
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
STATIC_ASSERT(Name::kEmptyHashField == 0x3);
// Verify that a set hash field will not look like a tagged object.
@@ -646,8 +651,9 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
Map GetFirstMap() const;
int ExtractMaps(MapHandles* maps) const;
+ int ExtractMapsAndHandlers(MapHandles* maps,
+ MaybeObjectHandles* handlers) const;
MaybeObjectHandle FindHandlerForMap(Handle<Map> map) const;
- bool FindHandlers(MaybeObjectHandles* code_list, int length = -1) const;
bool IsCleared() const {
InlineCacheState state = ic_state();
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index be60fb54a2..997cd68c32 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -19,7 +19,7 @@ FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) {
return FieldIndex(true, offset, encoding, 0, 0);
}
-FieldIndex FieldIndex::ForPropertyIndex(const Map map, int property_index,
+FieldIndex FieldIndex::ForPropertyIndex(Map map, int property_index,
Representation representation) {
DCHECK(map.instance_type() >= FIRST_NONSTRING_TYPE);
int inobject_properties = map.GetInObjectProperties();
@@ -60,9 +60,15 @@ int FieldIndex::GetLoadByFieldIndex() const {
return is_double() ? (result | 1) : result;
}
-FieldIndex FieldIndex::ForDescriptor(const Map map, int descriptor_index) {
+FieldIndex FieldIndex::ForDescriptor(Map map, int descriptor_index) {
+ Isolate* isolate = GetIsolateForPtrCompr(map);
+ return ForDescriptor(isolate, map, descriptor_index);
+}
+
+FieldIndex FieldIndex::ForDescriptor(Isolate* isolate, Map map,
+ int descriptor_index) {
PropertyDetails details =
- map.instance_descriptors().GetDetails(descriptor_index);
+ map.instance_descriptors(isolate).GetDetails(descriptor_index);
int field_index = details.field_index();
return ForPropertyIndex(map, field_index, details.representation());
}
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index f352ef6800..a6657634c8 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -24,10 +24,12 @@ class FieldIndex final {
FieldIndex() : bit_field_(0) {}
static inline FieldIndex ForPropertyIndex(
- const Map map, int index,
+ Map map, int index,
Representation representation = Representation::Tagged());
static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
- static inline FieldIndex ForDescriptor(const Map map, int descriptor_index);
+ static inline FieldIndex ForDescriptor(Map map, int descriptor_index);
+ static inline FieldIndex ForDescriptor(Isolate* isolate, Map map,
+ int descriptor_index);
inline int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 6d2b42edbf..79c29a6eeb 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -90,51 +90,57 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
}
Object FixedArray::get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_FIELD(*this, kHeaderSize + index * kTaggedSize);
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object FixedArray::get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
Handle<Object> FixedArray::get(FixedArray array, int index, Isolate* isolate) {
- return handle(array.get(index), isolate);
+ return handle(array.get(isolate, index), isolate);
}
bool FixedArray::is_the_hole(Isolate* isolate, int index) {
- return get(index).IsTheHole(isolate);
+ return get(isolate, index).IsTheHole(isolate);
}
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(index, this->length());
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
DCHECK(Object(value).IsSmi());
- int offset = kHeaderSize + index * kTaggedSize;
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kTaggedSize;
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
WRITE_BARRIER(*this, offset, value);
}
void FixedArray::set(int index, Object value, WriteBarrierMode mode) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kTaggedSize;
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
+// static
void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
DCHECK_NE(array.map(), array.GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, array.length());
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(array.length()));
DCHECK(!ObjectInYoungGeneration(value));
- RELAXED_WRITE_FIELD(array, kHeaderSize + index * kTaggedSize, value);
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(array, offset, value);
}
void FixedArray::set_undefined(int index) {
@@ -323,7 +329,7 @@ uint64_t FixedDoubleArray::get_representation(int index) {
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize;
// Bug(v8:8875): Doubles may be unaligned.
- return ReadUnalignedValue<uint64_t>(field_address(offset));
+ return base::ReadUnalignedValue<uint64_t>(field_address(offset));
}
Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index,
@@ -355,7 +361,7 @@ void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- WriteUnalignedValue<uint64_t>(field_address(offset), kHoleNanInt64);
+ base::WriteUnalignedValue<uint64_t>(field_address(offset), kHoleNanInt64);
}
bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
@@ -382,8 +388,14 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
MaybeObject WeakFixedArray::Get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return Get(isolate, index);
+}
+
+MaybeObject WeakFixedArray::Get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ return TaggedField<MaybeObject>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void WeakFixedArray::Set(int index, MaybeObject value) {
@@ -424,8 +436,14 @@ void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index,
}
MaybeObject WeakArrayList::Get(int index) const {
- DCHECK(index >= 0 && index < this->capacity());
- return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return Get(isolate, index);
+}
+
+MaybeObject WeakArrayList::Get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(capacity()));
+ return TaggedField<MaybeObject>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
@@ -478,6 +496,10 @@ Object ArrayList::Get(int index) const {
return FixedArray::cast(*this).get(kFirstIndex + index);
}
+Object ArrayList::Get(Isolate* isolate, int index) const {
+ return FixedArray::cast(*this).get(isolate, kFirstIndex + index);
+}
+
ObjectSlot ArrayList::Slot(int index) {
return RawField(OffsetOfElementAt(kFirstIndex + index));
}
@@ -538,6 +560,16 @@ void ByteArray::set_uint32(int index, uint32_t value) {
WriteField<uint32_t>(kHeaderSize + index * kUInt32Size, value);
}
+uint32_t ByteArray::get_uint32_relaxed(int index) const {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ return RELAXED_READ_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size);
+}
+
+void ByteArray::set_uint32_relaxed(int index, uint32_t value) {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ RELAXED_WRITE_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size, value);
+}
+
void ByteArray::clear_padding() {
int data_size = length() + kHeaderSize;
memset(reinterpret_cast<void*>(address() + data_size), 0, Size() - data_size);
@@ -589,6 +621,10 @@ Object TemplateList::get(int index) const {
return FixedArray::cast(*this).get(kFirstElementIndex + index);
}
+Object TemplateList::get(Isolate* isolate, int index) const {
+ return FixedArray::cast(*this).get(isolate, kFirstElementIndex + index);
+}
+
void TemplateList::set(int index, Object value) {
FixedArray::cast(*this).set(kFirstElementIndex + index, value);
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 02f26502b2..ca6f06e83c 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -72,16 +72,15 @@ enum FixedArraySubInstanceType {
class FixedArrayBase : public HeapObject {
public:
// [length]: length of the array.
- inline int length() const;
- inline void set_length(int value);
+ DECL_INT_ACCESSORS(length)
// Get and set the length using acquire loads and release stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length)
inline Object unchecked_synchronized_length() const;
DECL_CAST(FixedArrayBase)
+ DECL_VERIFIER(FixedArrayBase)
static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
@@ -113,6 +112,8 @@ class FixedArray : public FixedArrayBase {
public:
// Setter and getter for elements.
inline Object get(int index) const;
+ inline Object get(Isolate* isolate, int index) const;
+
static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
@@ -267,6 +268,7 @@ class WeakFixedArray : public HeapObject {
DECL_CAST(WeakFixedArray)
inline MaybeObject Get(int index) const;
+ inline MaybeObject Get(Isolate* isolate, int index) const;
// Setter that uses write barrier.
inline void Set(int index, MaybeObject value);
@@ -281,8 +283,7 @@ class WeakFixedArray : public HeapObject {
DECL_INT_ACCESSORS(length)
// Get and set the length using acquire loads and release stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length)
// Gives access to raw memory which stores the array's data.
inline MaybeObjectSlot data_start();
@@ -336,6 +337,7 @@ class WeakArrayList : public HeapObject {
const MaybeObjectHandle& value);
inline MaybeObject Get(int index) const;
+ inline MaybeObject Get(Isolate* isolate, int index) const;
// Set the element at index to obj. The underlying array must be large enough.
// If you need to grow the WeakArrayList, use the static AddToEnd() method
@@ -359,19 +361,12 @@ class WeakArrayList : public HeapObject {
DECL_INT_ACCESSORS(length)
// Get and set the capacity using acquire loads and release stores.
- inline int synchronized_capacity() const;
- inline void synchronized_set_capacity(int value);
-
+ DECL_SYNCHRONIZED_INT_ACCESSORS(capacity)
// Layout description.
-#define WEAK_ARRAY_LIST_FIELDS(V) \
- V(kCapacityOffset, kTaggedSize) \
- V(kLengthOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_ARRAY_LIST_FIELDS)
-#undef WEAK_ARRAY_LIST_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WEAK_ARRAY_LIST_FIELDS)
+ static constexpr int kHeaderSize = kSize;
using BodyDescriptor = WeakArrayBodyDescriptor;
@@ -442,6 +437,7 @@ class ArrayList : public FixedArray {
// storage capacity, i.e., length().
inline void SetLength(int length);
inline Object Get(int index) const;
+ inline Object Get(Isolate* isolate, int index) const;
inline ObjectSlot Slot(int index);
// Set the element at index to obj. The underlying array must be large enough.
@@ -492,6 +488,9 @@ class ByteArray : public FixedArrayBase {
inline uint32_t get_uint32(int index) const;
inline void set_uint32(int index, uint32_t value);
+ inline uint32_t get_uint32_relaxed(int index) const;
+ inline void set_uint32_relaxed(int index, uint32_t value);
+
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
inline void clear_padding();
@@ -552,9 +551,9 @@ class PodArray : public ByteArray {
static Handle<PodArray<T>> New(
Isolate* isolate, int length,
AllocationType allocation = AllocationType::kYoung);
- void copy_out(int index, T* result) {
+ void copy_out(int index, T* result, int length) {
ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
- sizeof(T));
+ length * sizeof(T));
}
void copy_in(int index, const T* buffer, int length) {
@@ -562,9 +561,14 @@ class PodArray : public ByteArray {
length * sizeof(T));
}
+ bool matches(const T* buffer, int length) {
+ DCHECK_LE(length, this->length());
+ return memcmp(GetDataStartAddress(), buffer, length * sizeof(T)) == 0;
+ }
+
T get(int index) {
T result;
- copy_out(index, &result);
+ copy_out(index, &result, 1);
return result;
}
@@ -581,6 +585,7 @@ class TemplateList : public FixedArray {
static Handle<TemplateList> New(Isolate* isolate, int size);
inline int length() const;
inline Object get(int index) const;
+ inline Object get(Isolate* isolate, int index) const;
inline void set(int index, Object value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index bea8257515..848b8202ae 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -26,29 +26,14 @@ RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
int FreeSpace::Size() { return size(); }
FreeSpace FreeSpace::next() {
-#ifdef DEBUG
- Heap* heap = GetHeapFromWritableObject(*this);
- Object free_space_map =
- Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
- !heap->deserialization_complete() &&
- map_slot().contains_value(kNullAddress));
-#endif
- DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
- return FreeSpace::unchecked_cast(*ObjectSlot(address() + kNextOffset));
+ DCHECK(IsValid());
+ return FreeSpace::unchecked_cast(
+ TaggedField<Object, kNextOffset>::load(*this));
}
void FreeSpace::set_next(FreeSpace next) {
-#ifdef DEBUG
- Heap* heap = GetHeapFromWritableObject(*this);
- Object free_space_map =
- Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
- !heap->deserialization_complete() &&
- map_slot().contains_value(kNullAddress));
-#endif
- DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
- ObjectSlot(address() + kNextOffset).Relaxed_Store(next);
+ DCHECK(IsValid());
+ RELAXED_WRITE_FIELD(*this, kNextOffset, next);
}
FreeSpace FreeSpace::cast(HeapObject o) {
@@ -61,6 +46,17 @@ FreeSpace FreeSpace::unchecked_cast(const Object o) {
return bit_cast<FreeSpace>(o);
}
+bool FreeSpace::IsValid() {
+ Heap* heap = GetHeapFromWritableObject(*this);
+ Object free_space_map =
+ Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
+ CHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
+ !heap->deserialization_complete() &&
+ map_slot().contains_value(kNullAddress));
+ CHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index 38f5794646..5714727036 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -44,6 +44,9 @@ class FreeSpace : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FREE_SPACE_FIELDS)
+ private:
+ inline bool IsValid();
+
OBJECT_CONSTRUCTORS(FreeSpace, HeapObject);
};
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 77453721ae..b807851d85 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -71,14 +71,19 @@ void EphemeronHashTable::set_key(int index, Object value,
}
int HashTableBase::NumberOfElements() const {
- return Smi::ToInt(get(kNumberOfElementsIndex));
+ int offset = OffsetOfElementAt(kNumberOfElementsIndex);
+ return TaggedField<Smi>::load(*this, offset).value();
}
int HashTableBase::NumberOfDeletedElements() const {
- return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
+ int offset = OffsetOfElementAt(kNumberOfDeletedElementsIndex);
+ return TaggedField<Smi>::load(*this, offset).value();
}
-int HashTableBase::Capacity() const { return Smi::ToInt(get(kCapacityIndex)); }
+int HashTableBase::Capacity() const {
+ int offset = OffsetOfElementAt(kCapacityIndex);
+ return TaggedField<Smi>::load(*this, offset).value();
+}
void HashTableBase::ElementAdded() {
SetNumberOfElements(NumberOfElements() + 1);
@@ -165,6 +170,15 @@ bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, int entry,
}
template <typename Derived, typename Shape>
+bool HashTable<Derived, Shape>::ToKey(Isolate* isolate, int entry,
+ Object* out_k) {
+ Object k = KeyAt(isolate, entry);
+ if (!IsKey(GetReadOnlyRoots(isolate), k)) return false;
+ *out_k = Shape::Unwrap(k);
+ return true;
+}
+
+template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::set_key(int index, Object value) {
DCHECK(!IsEphemeronHashTable());
FixedArray::set(index, value);
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 610dc9d28e..54d8ce0d2a 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -160,9 +160,16 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
static bool IsKey(ReadOnlyRoots roots, Object k);
inline bool ToKey(ReadOnlyRoots roots, int entry, Object* out_k);
+ inline bool ToKey(Isolate* isolate, int entry, Object* out_k);
// Returns the key at entry.
- Object KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
+ Object KeyAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return KeyAt(isolate, entry);
+ }
+ Object KeyAt(Isolate* isolate, int entry) {
+ return get(isolate, EntryToIndex(entry) + kEntryKeyIndex);
+ }
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 3986e9146c..3d70d71c89 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -31,11 +31,11 @@ void HeapNumberBase::set_value(double value) {
uint64_t HeapNumberBase::value_as_bits() const {
// Bug(v8:8875): HeapNumber's double may be unaligned.
- return ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
+ return base::ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
}
void HeapNumberBase::set_value_as_bits(uint64_t bits) {
- WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
+ base::WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
}
int HeapNumberBase::get_exponent() {
diff --git a/deps/v8/src/objects/heap-object-inl.h b/deps/v8/src/objects/heap-object-inl.h
index 3d5deeff63..88c0011bdf 100644
--- a/deps/v8/src/objects/heap-object-inl.h
+++ b/deps/v8/src/objects/heap-object-inl.h
@@ -7,10 +7,6 @@
#include "src/objects/heap-object.h"
-#include "src/heap/heap-write-barrier-inl.h"
-// TODO(jkummerow): Get rid of this by moving NROSO::GetIsolate elsewhere.
-#include "src/execution/isolate.h"
-
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -24,16 +20,6 @@ HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
IsHeapObject());
}
-// static
-Heap* NeverReadOnlySpaceObject::GetHeap(const HeapObject object) {
- return GetHeapFromWritableObject(object);
-}
-
-// static
-Isolate* NeverReadOnlySpaceObject::GetIsolate(const HeapObject object) {
- return Isolate::FromHeap(GetHeap(object));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 9ca51bdda1..ad5475c9e8 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -9,6 +9,7 @@
#include "src/roots/roots.h"
#include "src/objects/objects.h"
+#include "src/objects/tagged-field.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,27 +23,30 @@ class Heap;
// objects.
class HeapObject : public Object {
public:
- bool is_null() const { return ptr() == kNullAddress; }
+ bool is_null() const {
+ return static_cast<Tagged_t>(ptr()) == static_cast<Tagged_t>(kNullAddress);
+ }
// [map]: Contains a map which contains the object's reflective
// information.
- inline Map map() const;
+ DECL_GETTER(map, Map)
inline void set_map(Map value);
- inline MapWordSlot map_slot() const;
+ inline ObjectSlot map_slot() const;
// The no-write-barrier version. This is OK if the object is white and in
// new space, or if the value is an immortal immutable object, like the maps
// of primitive (non-JS) objects like strings, heap numbers etc.
inline void set_map_no_write_barrier(Map value);
- // Get the map using acquire load.
- inline Map synchronized_map() const;
- inline MapWord synchronized_map_word() const;
-
- // Set the map using release store
+ // Access the map using acquire load and release store.
+ DECL_GETTER(synchronized_map, Map)
inline void synchronized_set_map(Map value);
- inline void synchronized_set_map_word(MapWord map_word);
+
+ // Compare-and-swaps map word using release store, returns true if the map
+ // word was actually swapped.
+ inline bool synchronized_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word);
// Initialize the map immediately after the object is allocated.
// Do not use this outside Heap.
@@ -51,18 +55,29 @@ class HeapObject : public Object {
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
- inline MapWord map_word() const;
+ DECL_GETTER(map_word, MapWord)
inline void set_map_word(MapWord map_word);
+ // Access the map word using acquire load and release store.
+ DECL_GETTER(synchronized_map_word, MapWord)
+ inline void synchronized_set_map_word(MapWord map_word);
+
// TODO(v8:7464): Once RO_SPACE is shared between isolates, this method can be
// removed as ReadOnlyRoots will be accessible from a global variable. For now
// this method exists to help remove GetIsolate/GetHeap from HeapObject, in a
// way that doesn't require passing Isolate/Heap down huge call chains or to
// places where it might not be safe to access it.
inline ReadOnlyRoots GetReadOnlyRoots() const;
+ // This version is intended to be used for the isolate values produced by
+ // i::GetIsolateForPtrCompr(HeapObject) function which may return nullptr.
+ inline ReadOnlyRoots GetReadOnlyRoots(Isolate* isolate) const;
-#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
+#define IS_TYPE_FUNCTION_DECL(Type) \
+ V8_INLINE bool Is##Type() const; \
+ V8_INLINE bool Is##Type(Isolate* isolate) const;
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(HashTableBase)
+ IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
#undef IS_TYPE_FUNCTION_DECL
bool IsExternal(Isolate* isolate) const;
@@ -74,13 +89,12 @@ class HeapObject : public Object {
V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
#undef IS_TYPE_FUNCTION_DECL
- V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
- V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
- V8_INLINE bool IsNullOrUndefined() const;
-
-#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
+ V8_INLINE bool Is##Name() const; \
+ V8_INLINE bool Is##Name(Isolate* isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -189,6 +203,8 @@ class HeapObject : public Object {
STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
+ using MapField = TaggedField<MapWord, HeapObject::kMapOffset>;
+
inline Address GetFieldAddress(int field_offset) const;
protected:
@@ -203,16 +219,6 @@ class HeapObject : public Object {
OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
CAST_ACCESSOR(HeapObject)
-// Helper class for objects that can never be in RO space.
-class NeverReadOnlySpaceObject {
- public:
- // The Heap the object was allocated in. Used also to access Isolate.
- static inline Heap* GetHeap(const HeapObject object);
-
- // Convenience method to get current isolate.
- static inline Isolate* GetIsolate(const HeapObject object);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index 559ed34784..79c953aa87 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -11,6 +11,8 @@
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
+#include "torque-generated/instance-types-tq.h"
+
namespace v8 {
namespace internal {
@@ -32,11 +34,16 @@ enum StringRepresentationTag {
};
const uint32_t kIsIndirectStringMask = 1 << 0;
const uint32_t kIsIndirectStringTag = 1 << 0;
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kSlicedStringTag & kIsIndirectStringMask) ==
kIsIndirectStringTag);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
// For strings, bit 3 indicates whether the string consists of two-byte
@@ -141,6 +148,7 @@ enum InstanceType : uint16_t {
ACCESSOR_PAIR_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
ALLOCATION_MEMENTO_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ASM_WASM_DATA_TYPE,
ASYNC_GENERATOR_REQUEST_TYPE,
CLASS_POSITIONS_TYPE,
@@ -150,24 +158,23 @@ enum InstanceType : uint16_t {
FUNCTION_TEMPLATE_RARE_DATA_TYPE,
INTERCEPTOR_INFO_TYPE,
INTERPRETER_DATA_TYPE,
- MODULE_INFO_ENTRY_TYPE,
- MODULE_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
PROMISE_CAPABILITY_TYPE,
PROMISE_REACTION_TYPE,
PROTOTYPE_INFO_TYPE,
SCRIPT_TYPE,
SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE,
+ SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE,
STACK_FRAME_INFO_TYPE,
STACK_TRACE_FRAME_TYPE,
TEMPLATE_OBJECT_DESCRIPTION_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
WASM_CAPI_FUNCTION_DATA_TYPE,
WASM_DEBUG_INFO_TYPE,
WASM_EXCEPTION_TAG_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ WASM_INDIRECT_FUNCTION_TABLE_TYPE,
WASM_JS_FUNCTION_DATA_TYPE,
CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
@@ -177,6 +184,14 @@ enum InstanceType : uint16_t {
PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+#define MAKE_TORQUE_INSTANCE_TYPE(V) V,
+ TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_INSTANCE_TYPE)
+#undef MAKE_TORQUE_INSTANCE_TYPE
+
+ // Modules
+ SOURCE_TEXT_MODULE_TYPE, // FIRST_MODULE_TYPE
+ SYNTHETIC_MODULE_TYPE, // LAST_MODULE_TYPE
+
ALLOCATION_SITE_TYPE,
EMBEDDER_DATA_ARRAY_TYPE,
// FixedArrays.
@@ -246,7 +261,7 @@ enum InstanceType : uint16_t {
// Like JS_API_OBJECT_TYPE, but requires access checks and/or has
// interceptors.
JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE
- JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
+ JS_PRIMITIVE_WRAPPER_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
// Like JS_OBJECT_TYPE, but created from API function.
JS_API_OBJECT_TYPE = 0x0420,
JS_OBJECT_TYPE,
@@ -332,6 +347,9 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of Microtask.
FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
LAST_MICROTASK_TYPE = FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,
+ // Boundaries of module record types
+ FIRST_MODULE_TYPE = SOURCE_TEXT_MODULE_TYPE,
+ LAST_MODULE_TYPE = SYNTHETIC_MODULE_TYPE,
// Boundary for promotion to old space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -349,7 +367,7 @@ enum InstanceType : uint16_t {
// Boundary case for testing JSReceivers that may have elements while having
// an empty fixed array as elements backing store. This is true for string
// wrappers.
- LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
+ LAST_CUSTOM_ELEMENTS_RECEIVER = JS_PRIMITIVE_WRAPPER_TYPE,
FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE,
LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE,
@@ -364,6 +382,7 @@ enum InstanceType : uint16_t {
constexpr InstanceType LAST_STRING_TYPE =
static_cast<InstanceType>(FIRST_NONSTRING_TYPE - 1);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
@@ -420,12 +439,16 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSDataView, JS_DATA_VIEW_TYPE) \
V(JSDate, JS_DATE_TYPE) \
V(JSError, JS_ERROR_TYPE) \
+ V(JSFinalizationGroup, JS_FINALIZATION_GROUP_TYPE) \
+ V(JSFinalizationGroupCleanupIterator, \
+ JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
V(JSFunction, JS_FUNCTION_TYPE) \
V(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE) \
V(JSGlobalProxy, JS_GLOBAL_PROXY_TYPE) \
V(JSMap, JS_MAP_TYPE) \
V(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) \
V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE) \
+ V(JSPrimitiveWrapper, JS_PRIMITIVE_WRAPPER_TYPE) \
V(JSPromise, JS_PROMISE_TYPE) \
V(JSProxy, JS_PROXY_TYPE) \
V(JSRegExp, JS_REGEXP_TYPE) \
@@ -434,10 +457,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSSet, JS_SET_TYPE) \
V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
- V(JSValue, JS_VALUE_TYPE) \
- V(JSFinalizationGroup, JS_FINALIZATION_GROUP_TYPE) \
- V(JSFinalizationGroupCleanupIterator, \
- JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
V(JSWeakMap, JS_WEAK_MAP_TYPE) \
V(JSWeakRef, JS_WEAK_REF_TYPE) \
V(JSWeakSet, JS_WEAK_SET_TYPE) \
@@ -462,9 +481,11 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \
V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \
V(SmallOrderedNameDictionary, SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
+ V(SourceTextModule, SOURCE_TEXT_MODULE_TYPE) \
V(StoreHandler, STORE_HANDLER_TYPE) \
V(StringTable, STRING_TABLE_TYPE) \
V(Symbol, SYMBOL_TYPE) \
+ V(SyntheticModule, SYNTHETIC_MODULE_TYPE) \
V(TransitionArray, TRANSITION_ARRAY_TYPE) \
V(UncompiledDataWithoutPreparseData, \
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
@@ -505,6 +526,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \
+ V(Module, FIRST_MODULE_TYPE, LAST_MODULE_TYPE) \
V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE) \
V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE) \
V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE)
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index f2bc87ebac..dbf212aaf8 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -34,9 +34,11 @@
#include "unicode/formattedvalue.h"
#include "unicode/locid.h"
#include "unicode/normalizer2.h"
+#include "unicode/numberformatter.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
#include "unicode/timezone.h"
+#include "unicode/ures.h"
#include "unicode/ustring.h"
#include "unicode/uvernum.h" // U_ICU_VERSION_MAJOR_NUM
@@ -52,9 +54,8 @@ namespace v8 {
namespace internal {
namespace {
-inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
-const uint8_t kToLower[256] = {
+constexpr uint8_t kToLower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
@@ -79,20 +80,17 @@ const uint8_t kToLower[256] = {
0xFC, 0xFD, 0xFE, 0xFF,
};
-inline uint16_t ToLatin1Lower(uint16_t ch) {
+inline constexpr uint16_t ToLatin1Lower(uint16_t ch) {
return static_cast<uint16_t>(kToLower[ch]);
}
-inline uint16_t ToASCIIUpper(uint16_t ch) {
- return ch & ~((ch >= 'a' && ch <= 'z') << 5);
-}
-
// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
-inline uint16_t ToLatin1Upper(uint16_t ch) {
+inline constexpr uint16_t ToLatin1Upper(uint16_t ch) {
+#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
+#endif
return ch &
- ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xF7))
- << 5);
+ ~((IsAsciiLower(ch) || (((ch & 0xE0) == 0xE0) && ch != 0xF7)) << 5);
}
template <typename Char>
@@ -104,7 +102,7 @@ bool ToUpperFastASCII(const Vector<const Char>& src,
for (auto it = src.begin(); it != src.end(); ++it) {
uint16_t ch = static_cast<uint16_t>(*it);
ored |= ch;
- result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
+ result->SeqOneByteStringSet(index++, ToAsciiUpper(ch));
}
return !(ored & ~0x7F);
}
@@ -155,7 +153,7 @@ void ToUpperWithSharpS(const Vector<const Char>& src,
inline int FindFirstUpperOrNonAscii(String s, int length) {
for (int index = 0; index < length; ++index) {
uint16_t ch = s.Get(index);
- if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+ if (V8_UNLIKELY(IsAsciiUpper(ch) || ch & ~0x7F)) {
return index;
}
}
@@ -180,12 +178,11 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
template <typename T>
MaybeHandle<T> New(Isolate* isolate, Handle<JSFunction> constructor,
Handle<Object> locales, Handle<Object> options) {
- Handle<JSObject> result;
+ Handle<Map> map;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
- T);
- return T::Initialize(isolate, Handle<T>::cast(result), locales, options);
+ isolate, map,
+ JSFunction::GetDerivedMap(isolate, constructor, constructor), T);
+ return T::New(isolate, map, locales, options);
}
} // namespace
@@ -212,6 +209,24 @@ icu::UnicodeString Intl::ToICUUnicodeString(Isolate* isolate,
return icu::UnicodeString(uchar_buffer, length);
}
+icu::StringPiece Intl::ToICUStringPiece(Isolate* isolate,
+ Handle<String> string) {
+ DCHECK(string->IsFlat());
+ DisallowHeapAllocation no_gc;
+
+ const String::FlatContent& flat = string->GetFlatContent(no_gc);
+ if (!flat.IsOneByte()) return icu::StringPiece(nullptr, 0);
+
+ int32_t length = string->length();
+ const char* char_buffer =
+ reinterpret_cast<const char*>(flat.ToOneByteVector().begin());
+ if (!String::IsAscii(char_buffer, length)) {
+ return icu::StringPiece(nullptr, 0);
+ }
+
+ return icu::StringPiece(char_buffer, length);
+}
+
namespace {
MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
bool is_to_upper, const char* lang) {
@@ -506,23 +521,59 @@ bool RemoveLocaleScriptTag(const std::string& icu_locale,
return true;
}
+bool ValidateResource(const icu::Locale locale, const char* path,
+ const char* key) {
+ bool result = false;
+ UErrorCode status = U_ZERO_ERROR;
+ UResourceBundle* bundle = ures_open(path, locale.getName(), &status);
+ if (bundle != nullptr && status == U_ZERO_ERROR) {
+ if (key == nullptr) {
+ result = true;
+ } else {
+ UResourceBundle* key_bundle =
+ ures_getByKey(bundle, key, nullptr, &status);
+ result = key_bundle != nullptr && (status == U_ZERO_ERROR);
+ ures_close(key_bundle);
+ }
+ }
+ ures_close(bundle);
+ if (!result) {
+ if ((locale.getCountry()[0] != '\0') && (locale.getScript()[0] != '\0')) {
+ // Fallback to try without country.
+ std::string without_country(locale.getLanguage());
+ without_country = without_country.append("-").append(locale.getScript());
+ return ValidateResource(without_country.c_str(), path, key);
+ } else if ((locale.getCountry()[0] != '\0') ||
+ (locale.getScript()[0] != '\0')) {
+ // Fallback to try with only language.
+ std::string language(locale.getLanguage());
+ return ValidateResource(language.c_str(), path, key);
+ }
+ }
+ return result;
+}
+
} // namespace
std::set<std::string> Intl::BuildLocaleSet(
- const icu::Locale* icu_available_locales, int32_t count) {
+ const icu::Locale* icu_available_locales, int32_t count, const char* path,
+ const char* validate_key) {
std::set<std::string> locales;
for (int32_t i = 0; i < count; ++i) {
std::string locale =
Intl::ToLanguageTag(icu_available_locales[i]).FromJust();
+ if (path != nullptr || validate_key != nullptr) {
+ if (!ValidateResource(icu_available_locales[i], path, validate_key)) {
+ continue;
+ }
+ }
locales.insert(locale);
-
std::string shortened_locale;
if (RemoveLocaleScriptTag(locale, &shortened_locale)) {
std::replace(shortened_locale.begin(), shortened_locale.end(), '_', '-');
locales.insert(shortened_locale);
}
}
-
return locales;
}
@@ -683,19 +734,10 @@ V8_WARN_UNUSED_RESULT Maybe<bool> Intl::GetBoolOption(
namespace {
-char AsciiToLower(char c) {
- if (c < 'A' || c > 'Z') {
- return c;
- }
- return c | (1 << 5);
-}
-
-bool IsLowerAscii(char c) { return c >= 'a' && c < 'z'; }
-
bool IsTwoLetterLanguage(const std::string& locale) {
// Two letters, both in range 'a'-'z'...
- return locale.length() == 2 && IsLowerAscii(locale[0]) &&
- IsLowerAscii(locale[1]);
+ return locale.length() == 2 && IsAsciiLower(locale[0]) &&
+ IsAsciiLower(locale[1]);
}
bool IsDeprecatedLanguage(const std::string& locale) {
@@ -770,7 +812,7 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
// Because per BCP 47 2.1.1 language tags are case-insensitive, lowercase
// the input before any more check.
- std::transform(locale.begin(), locale.end(), locale.begin(), AsciiToLower);
+ std::transform(locale.begin(), locale.end(), locale.begin(), ToAsciiLower);
// ICU maps a few grandfathered tags to what looks like a regular language
// tag even though IANA language tag registry does not have a preferred
@@ -1020,6 +1062,16 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
UCollationResult result;
UErrorCode status = U_ZERO_ERROR;
+ icu::StringPiece string_piece1 = Intl::ToICUStringPiece(isolate, string1);
+ if (!string_piece1.empty()) {
+ icu::StringPiece string_piece2 = Intl::ToICUStringPiece(isolate, string2);
+ if (!string_piece2.empty()) {
+ result = icu_collator.compareUTF8(string_piece1, string_piece2, status);
+ DCHECK(U_SUCCESS(status));
+ return factory->NewNumberFromInt(result);
+ }
+ }
+
icu::UnicodeString string_val1 = Intl::ToICUUnicodeString(isolate, string1);
icu::UnicodeString string_val2 = Intl::ToICUUnicodeString(isolate, string2);
result = icu_collator.compare(string_val1, string_val2, status);
@@ -1116,10 +1168,12 @@ Maybe<int> DefaultNumberOption(Isolate* isolate, Handle<Object> value, int min,
return Just(FastD2I(floor(value_num->Number())));
}
+} // namespace
+
// ecma402/#sec-getnumberoption
-Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- Handle<String> property, int min, int max,
- int fallback) {
+Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ Handle<String> property, int min, int max,
+ int fallback) {
// 1. Let value be ? Get(options, property).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -1130,62 +1184,70 @@ Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
return DefaultNumberOption(isolate, value, min, max, fallback, property);
}
-Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- const char* property, int min, int max,
- int fallback) {
- Handle<String> property_str =
- isolate->factory()->NewStringFromAsciiChecked(property);
- return GetNumberOption(isolate, options, property_str, min, max, fallback);
-}
-
-} // namespace
-
Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
Isolate* isolate, Handle<JSReceiver> options, int mnfd_default,
- int mxfd_default) {
+ int mxfd_default, bool notation_is_compact) {
+ Factory* factory = isolate->factory();
Intl::NumberFormatDigitOptions digit_options;
// 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21,
// 1).
- int mnid;
- if (!GetNumberOption(isolate, options, "minimumIntegerDigits", 1, 21, 1)
+ int mnid = 1;
+ if (!Intl::GetNumberOption(isolate, options,
+ factory->minimumIntegerDigits_string(), 1, 21, 1)
.To(&mnid)) {
return Nothing<NumberFormatDigitOptions>();
}
- // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
- // mnfdDefault).
- int mnfd;
- if (!GetNumberOption(isolate, options, "minimumFractionDigits", 0, 20,
- mnfd_default)
- .To(&mnfd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
+ int mnfd = 0;
+ int mxfd = 0;
+ Handle<Object> mnfd_obj;
+ Handle<Object> mxfd_obj;
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // 6. Let mnfd be ? Get(options, "minimumFractionDigits").
+ Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mnfd_obj, JSReceiver::GetProperty(isolate, options, mnfd_str),
+ Nothing<NumberFormatDigitOptions>());
+
+ // 8. Let mnfd be ? Get(options, "maximumFractionDigits").
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mxfd_obj, JSReceiver::GetProperty(isolate, options, mxfd_str),
+ Nothing<NumberFormatDigitOptions>());
+ } else {
+ // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
+ // mnfdDefault).
+ if (!Intl::GetNumberOption(isolate, options,
+ factory->minimumFractionDigits_string(), 0, 20,
+ mnfd_default)
+ .To(&mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
- // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
- int mxfd_actual_default = std::max(mnfd, mxfd_default);
+ // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
+ int mxfd_actual_default = std::max(mnfd, mxfd_default);
- // 8. Let mxfd be ? GetNumberOption(options,
- // "maximumFractionDigits", mnfd, 20, mxfdActualDefault).
- int mxfd;
- if (!GetNumberOption(isolate, options, "maximumFractionDigits", mnfd, 20,
- mxfd_actual_default)
- .To(&mxfd)) {
- return Nothing<NumberFormatDigitOptions>();
+ // 8. Let mxfd be ? GetNumberOption(options,
+ // "maximumFractionDigits", mnfd, 20, mxfdActualDefault).
+ if (!Intl::GetNumberOption(isolate, options,
+ factory->maximumFractionDigits_string(), mnfd,
+ 20, mxfd_actual_default)
+ .To(&mxfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
}
// 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
Handle<Object> mnsd_obj;
- Handle<String> mnsd_str =
- isolate->factory()->minimumSignificantDigits_string();
+ Handle<String> mnsd_str = factory->minimumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str),
Nothing<NumberFormatDigitOptions>());
// 10. Let mxsd be ? Get(options, "maximumSignificantDigits").
Handle<Object> mxsd_obj;
- Handle<String> mxsd_str =
- isolate->factory()->maximumSignificantDigits_string();
+ Handle<String> mxsd_str = factory->maximumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str),
Nothing<NumberFormatDigitOptions>());
@@ -1222,8 +1284,50 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
} else {
digit_options.minimum_significant_digits = 0;
digit_options.maximum_significant_digits = 0;
- }
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // 15. Else If mnfd is not undefined or mxfd is not undefined, then
+ if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
+ // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault).
+ Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default,
+ mnfd_str)
+ .To(&mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+
+ // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
+ int mxfd_actual_default = std::max(mnfd, mxfd_default);
+
+ // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20,
+ // mxfdActualDefault).
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20,
+ mxfd_actual_default, mxfd_str)
+ .To(&mxfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+ // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd.
+ digit_options.minimum_fraction_digits = mnfd;
+
+ // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd.
+ digit_options.maximum_fraction_digits = mxfd;
+ // Else If intlObj.[[Notation]] is "compact", then
+ } else if (notation_is_compact) {
+ // a. Set intlObj.[[RoundingType]] to "compact-rounding".
+ // Set minimum_significant_digits to -1 to represent roundingtype is
+ // "compact-rounding".
+ digit_options.minimum_significant_digits = -1;
+ // 17. Else,
+ } else {
+ // 17. b. Set intlObj.[[MinimumFractionDigits]] to mnfdDefault.
+ digit_options.minimum_fraction_digits = mnfd_default;
+
+ // 17. c. Set intlObj.[[MaximumFractionDigits]] to mxfdDefault.
+ digit_options.maximum_fraction_digits = mxfd_default;
+ }
+ }
+ }
return Just(digit_options);
}
@@ -1678,7 +1782,7 @@ Intl::ResolvedLocale Intl::ResolveLocale(
return Intl::ResolvedLocale{canonicalized_locale, icu_locale, extensions};
}
-Managed<icu::UnicodeString> Intl::SetTextToBreakIterator(
+Handle<Managed<icu::UnicodeString>> Intl::SetTextToBreakIterator(
Isolate* isolate, Handle<String> text, icu::BreakIterator* break_iterator) {
text = String::Flatten(isolate, text);
icu::UnicodeString* u_text =
@@ -1688,7 +1792,7 @@ Managed<icu::UnicodeString> Intl::SetTextToBreakIterator(
Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, u_text);
break_iterator->setText(*u_text);
- return *new_u_text;
+ return new_u_text;
}
// ecma262 #sec-string.prototype.normalize
@@ -1927,8 +2031,18 @@ const std::set<std::string>& Intl::GetAvailableLocalesForLocale() {
return available_locales.Pointer()->Get();
}
+namespace {
+
+struct CheckCalendar {
+ static const char* key() { return "calendar"; }
+ static const char* path() { return nullptr; }
+};
+
+} // namespace
+
const std::set<std::string>& Intl::GetAvailableLocalesForDateFormat() {
- static base::LazyInstance<Intl::AvailableLocales<icu::DateFormat>>::type
+ static base::LazyInstance<
+ Intl::AvailableLocales<icu::DateFormat, CheckCalendar>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
@@ -1966,16 +2080,17 @@ Handle<String> Intl::NumberFieldToType(Isolate* isolate,
: isolate->factory()->plusSign_string();
} else {
double number = numeric_obj->Number();
- return number < 0 ? isolate->factory()->minusSign_string()
- : isolate->factory()->plusSign_string();
+ return std::signbit(number) ? isolate->factory()->minusSign_string()
+ : isolate->factory()->plusSign_string();
}
case UNUM_EXPONENT_SYMBOL_FIELD:
+ return isolate->factory()->exponentSeparator_string();
+
case UNUM_EXPONENT_SIGN_FIELD:
+ return isolate->factory()->exponentMinusSign_string();
+
case UNUM_EXPONENT_FIELD:
- // We should never get these because we're not using any scientific
- // formatter.
- UNREACHABLE();
- return Handle<String>();
+ return isolate->factory()->exponentInteger_string();
case UNUM_PERMILL_FIELD:
// We're not creating any permill formatter, and it's not even clear how
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 1274fa0549..4d4d3245fd 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -49,7 +49,8 @@ class Intl {
// script; eg, pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India)
// would include pa_IN.
static std::set<std::string> BuildLocaleSet(
- const icu::Locale* icu_available_locales, int32_t count);
+ const icu::Locale* icu_available_locales, int32_t count, const char* path,
+ const char* validate_key);
static Maybe<std::string> ToLanguageTag(const icu::Locale& locale);
@@ -126,6 +127,10 @@ class Intl {
Isolate* isolate, Handle<JSReceiver> options, const char* property,
const char* service, bool* result);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
+ Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
+ int min, int max, int fallback);
+
// Canonicalize the locale.
// https://tc39.github.io/ecma402/#sec-canonicalizelanguagetag,
// including type check and structural validity check.
@@ -180,7 +185,8 @@ class Intl {
};
V8_WARN_UNUSED_RESULT static Maybe<NumberFormatDigitOptions>
SetNumberFormatDigitOptions(Isolate* isolate, Handle<JSReceiver> options,
- int mnfd_default, int mxfd_default);
+ int mnfd_default, int mxfd_default,
+ bool notation_is_compact);
static icu::Locale CreateICULocale(const std::string& bcp47_locale);
@@ -277,20 +283,26 @@ class Intl {
// A helper template to implement the GetAvailableLocales
// Usage in src/objects/js-XXX.cc
- //
// const std::set<std::string>& JSXxx::GetAvailableLocales() {
// static base::LazyInstance<Intl::AvailableLocales<icu::YYY>>::type
// available_locales = LAZY_INSTANCE_INITIALIZER;
// return available_locales.Pointer()->Get();
// }
- template <typename T>
+
+ struct SkipResourceCheck {
+ static const char* key() { return nullptr; }
+ static const char* path() { return nullptr; }
+ };
+
+ template <typename T, typename C = SkipResourceCheck>
class AvailableLocales {
public:
AvailableLocales() {
int32_t num_locales = 0;
const icu::Locale* icu_available_locales =
T::getAvailableLocales(num_locales);
- set = Intl::BuildLocaleSet(icu_available_locales, num_locales);
+ set = Intl::BuildLocaleSet(icu_available_locales, num_locales, C::path(),
+ C::key());
}
virtual ~AvailableLocales() {}
const std::set<std::string>& Get() const { return set; }
@@ -300,7 +312,7 @@ class Intl {
};
// Utility function to set text to BreakIterator.
- static Managed<icu::UnicodeString> SetTextToBreakIterator(
+ static Handle<Managed<icu::UnicodeString>> SetTextToBreakIterator(
Isolate* isolate, Handle<String> text,
icu::BreakIterator* break_iterator);
@@ -313,6 +325,10 @@ class Intl {
static icu::UnicodeString ToICUUnicodeString(Isolate* isolate,
Handle<String> string);
+ // Convert a Handle<String> to icu::StringPiece
+ static icu::StringPiece ToICUStringPiece(Isolate* isolate,
+ Handle<String> string);
+
static const uint8_t* ToLatin1LowerTable();
static String ConvertOneByteToLower(String src, String dst);
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index 67d8537feb..d91df566c3 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include 'src/objects/js-break-iterator.h'
+#include 'src/objects/js-collator.h'
#include 'src/objects/js-number-format.h'
#include 'src/objects/js-objects.h'
#include 'src/objects/js-plural-rules.h'
@@ -37,8 +39,9 @@ extern class JSNumberFormat extends JSObject {
extern class JSPluralRules extends JSObject {
locale: String;
flags: Smi;
- icu_plural_rules: Foreign; // Managed<icu::PluralRules>
- icu_decimal_format: Foreign; // Managed<icu::DecimalFormat>
+ icu_plural_rules: Foreign; // Managed<icu::PluralRules>
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
}
extern class JSRelativeTimeFormat extends JSObject {
@@ -62,3 +65,20 @@ extern class JSSegmentIterator extends JSObject {
unicode_string: Foreign; // Managed<icu::UnicodeString>
flags: Smi;
}
+
+extern class JSV8BreakIterator extends JSObject {
+ locale: String;
+ break_iterator: Foreign; // Managed<icu::BreakIterator>;
+ unicode_string: Foreign; // Managed<icu::UnicodeString>;
+ bound_adopt_text: Undefined | JSFunction;
+ bound_first: Undefined | JSFunction;
+ bound_next: Undefined | JSFunction;
+ bound_current: Undefined | JSFunction;
+ bound_break_type: Undefined | JSFunction;
+ break_iterator_type: Smi;
+}
+
+extern class JSCollator extends JSObject {
+ icu_collator: Foreign; // Managed<icu::Collator>
+ bound_compare: Undefined | JSFunction;
+}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 061fec10f7..9151be6da4 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -91,11 +91,11 @@ void JSArrayBuffer::clear_padding() {
}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
- WriteField<uint32_t>(kBitFieldOffset, bits);
+ RELAXED_WRITE_UINT32_FIELD(*this, kBitFieldOffset, bits);
}
uint32_t JSArrayBuffer::bit_field() const {
- return ReadField<uint32_t>(kBitFieldOffset);
+ return RELAXED_READ_UINT32_FIELD(*this, kBitFieldOffset);
}
// |bit_field| fields.
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index b22001f04a..7bf2e1ae94 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -243,6 +243,12 @@ class JSTypedArray : public JSArrayBufferView {
class BodyDescriptor;
+#ifdef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
+ static constexpr size_t kMaxSizeInHeap = V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP;
+#else
+ static constexpr size_t kMaxSizeInHeap = 64;
+#endif
+
private:
static Handle<JSArrayBuffer> MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array);
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 335fabba86..1ff7dcb123 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -61,13 +61,14 @@ bool JSArray::HasArrayPrototype(Isolate* isolate) {
ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
+SMI_ACCESSORS(JSArrayIterator, raw_kind, kKindOffset)
+
IterationKind JSArrayIterator::kind() const {
- return static_cast<IterationKind>(
- Smi::cast(READ_FIELD(*this, kKindOffset)).value());
+ return static_cast<IterationKind>(raw_kind());
}
void JSArrayIterator::set_kind(IterationKind kind) {
- WRITE_FIELD(*this, kKindOffset, Smi::FromInt(static_cast<int>(kind)));
+ set_raw_kind(static_cast<int>(kind));
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 4bc296e31e..eb581c104e 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -132,7 +132,8 @@ class JSArray : public JSObject {
OBJECT_CONSTRUCTORS(JSArray, JSObject);
};
-Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
+ Handle<Context> native_context,
Handle<Map> initial_map);
// The JSArrayIterator describes JavaScript Array Iterators Objects, as
@@ -179,6 +180,9 @@ class JSArrayIterator : public JSObject {
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSARRAY_ITERATOR_FIELDS)
+ private:
+ DECL_INT_ACCESSORS(raw_kind)
+
OBJECT_CONSTRUCTORS(JSArrayIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 177d9d352b..86e87ddb0d 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -20,14 +20,12 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator, JSObject)
-inline void JSV8BreakIterator::set_type(Type type) {
- DCHECK_GT(JSV8BreakIterator::Type::COUNT, type);
- WRITE_FIELD(*this, kTypeOffset, Smi::FromInt(static_cast<int>(type)));
+inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
+ return static_cast<JSV8BreakIterator::Type>(raw_type());
}
-inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
- Object value = READ_FIELD(*this, kTypeOffset);
- return static_cast<JSV8BreakIterator::Type>(Smi::ToInt(value));
+inline void JSV8BreakIterator::set_type(Type type) {
+ set_raw_type(static_cast<int>(type));
}
ACCESSORS(JSV8BreakIterator, locale, String, kLocaleOffset)
@@ -41,6 +39,8 @@ ACCESSORS(JSV8BreakIterator, bound_next, Object, kBoundNextOffset)
ACCESSORS(JSV8BreakIterator, bound_current, Object, kBoundCurrentOffset)
ACCESSORS(JSV8BreakIterator, bound_break_type, Object, kBoundBreakTypeOffset)
+SMI_ACCESSORS(JSV8BreakIterator, raw_type, kBreakIteratorTypeOffset)
+
CAST_ACCESSOR(JSV8BreakIterator)
} // namespace internal
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 4879fb41a4..31ed3f8611 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -15,9 +15,9 @@
namespace v8 {
namespace internal {
-MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
- Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
- Handle<Object> locales, Handle<Object> options_obj) {
+MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options_obj) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
@@ -96,8 +96,13 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- break_iterator_holder->set_locale(*locale_str);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSV8BreakIterator> break_iterator_holder =
+ Handle<JSV8BreakIterator>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ break_iterator_holder->set_locale(*locale_str);
break_iterator_holder->set_type(type_enum);
break_iterator_holder->set_break_iterator(*managed_break_iterator);
break_iterator_holder->set_unicode_string(*managed_unicode_string);
@@ -126,9 +131,9 @@ void JSV8BreakIterator::AdoptText(
icu::BreakIterator* break_iterator =
break_iterator_holder->break_iterator().raw();
CHECK_NOT_NULL(break_iterator);
- Managed<icu::UnicodeString> unicode_string =
+ Handle<Managed<icu::UnicodeString>> unicode_string =
Intl::SetTextToBreakIterator(isolate, text, break_iterator);
- break_iterator_holder->set_unicode_string(unicode_string);
+ break_iterator_holder->set_unicode_string(*unicode_string);
}
Handle<String> JSV8BreakIterator::TypeAsString() const {
@@ -141,9 +146,8 @@ Handle<String> JSV8BreakIterator::TypeAsString() const {
return GetReadOnlyRoots().sentence_string_handle();
case Type::LINE:
return GetReadOnlyRoots().line_string_handle();
- case Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
Handle<Object> JSV8BreakIterator::Current(
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index fe94c177c4..4b40192c81 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -15,6 +15,7 @@
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
#include "src/objects/objects.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,9 +29,9 @@ namespace internal {
class JSV8BreakIterator : public JSObject {
public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> Initialize(
- Isolate* isolate, Handle<JSV8BreakIterator> break_iterator,
- Handle<Object> input_locales, Handle<Object> input_options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> input_locales,
+ Handle<Object> input_options);
static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator);
@@ -50,7 +51,7 @@ class JSV8BreakIterator : public JSObject {
static String BreakType(Isolate* isolate,
Handle<JSV8BreakIterator> break_iterator);
- enum class Type { CHARACTER, WORD, SENTENCE, LINE, COUNT };
+ enum class Type { CHARACTER, WORD, SENTENCE, LINE };
inline void set_type(Type type);
inline Type type() const;
@@ -69,23 +70,12 @@ class JSV8BreakIterator : public JSObject {
DECL_ACCESSORS(bound_current, Object)
DECL_ACCESSORS(bound_break_type, Object)
-// Layout description.
-#define BREAK_ITERATOR_FIELDS(V) \
- /* Pointer fields. */ \
- V(kLocaleOffset, kTaggedSize) \
- V(kTypeOffset, kTaggedSize) \
- V(kBreakIteratorOffset, kTaggedSize) \
- V(kUnicodeStringOffset, kTaggedSize) \
- V(kBoundAdoptTextOffset, kTaggedSize) \
- V(kBoundFirstOffset, kTaggedSize) \
- V(kBoundNextOffset, kTaggedSize) \
- V(kBoundCurrentOffset, kTaggedSize) \
- V(kBoundBreakTypeOffset, kTaggedSize) \
- /* Total Size */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, BREAK_ITERATOR_FIELDS)
-#undef BREAK_ITERATOR_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSV8BREAK_ITERATOR_FIELDS)
+
+ private:
+ DECL_INT_ACCESSORS(raw_type)
OBJECT_CONSTRUCTORS(JSV8BreakIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index e82351993d..a8d3893316 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -20,7 +20,7 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSCollator, JSObject)
-ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kICUCollatorOffset)
+ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kIcuCollatorOffset)
ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset)
CAST_ACCESSOR(JSCollator)
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index 4a1e857403..0413e2acd1 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -15,7 +15,9 @@
#include "unicode/locid.h"
#include "unicode/strenum.h"
#include "unicode/ucol.h"
+#include "unicode/udata.h"
#include "unicode/uloc.h"
+#include "unicode/utypes.h"
namespace v8 {
namespace internal {
@@ -239,10 +241,9 @@ void SetCaseFirstOption(icu::Collator* icu_collator,
} // anonymous namespace
// static
-MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
- Handle<JSCollator> collator,
- Handle<Object> locales,
- Handle<Object> options_obj) {
+MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options_obj) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -465,15 +466,31 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
Handle<Managed<icu::Collator>> managed_collator =
Managed<icu::Collator>::FromUniquePtr(isolate, 0,
std::move(icu_collator));
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSCollator> collator = Handle<JSCollator>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
collator->set_icu_collator(*managed_collator);
// 29. Return collator.
return collator;
}
+namespace {
+
+struct CheckColl {
+ static const char* key() { return nullptr; }
+#define U_ICUDATA_COLL U_ICUDATA_NAME U_TREE_SEPARATOR_STRING "coll"
+ static const char* path() { return U_ICUDATA_COLL; }
+#undef U_ICUDATA_COLL
+};
+
+} // namespace
+
const std::set<std::string>& JSCollator::GetAvailableLocales() {
- static base::LazyInstance<Intl::AvailableLocales<icu::Collator>>::type
- available_locales = LAZY_INSTANCE_INITIALIZER;
+ static base::LazyInstance<Intl::AvailableLocales<icu::Collator, CheckColl>>::
+ type available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index 2bedbf811a..e9114afeb1 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -32,8 +32,8 @@ namespace internal {
class JSCollator : public JSObject {
public:
// ecma402/#sec-initializecollator
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> Initialize(
- Isolate* isolate, Handle<JSCollator> collator, Handle<Object> locales,
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
Handle<Object> options);
// ecma402/#sec-intl.collator.prototype.resolvedoptions
@@ -47,14 +47,8 @@ class JSCollator : public JSObject {
DECL_VERIFIER(JSCollator)
// Layout description.
-#define JS_COLLATOR_FIELDS(V) \
- V(kICUCollatorOffset, kTaggedSize) \
- V(kBoundCompareOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_COLLATOR_FIELDS)
-#undef JS_COLLATOR_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSCOLLATOR_FIELDS)
DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
DECL_ACCESSORS(bound_compare, Object)
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index 4952f04a72..c002294b01 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -25,6 +25,7 @@ class JSCollectionIterator : public JSObject {
DECL_ACCESSORS(index, Object)
void JSCollectionIteratorPrint(std::ostream& os, const char* name);
+ DECL_VERIFIER(JSCollectionIterator)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSCOLLECTION_ITERATOR_FIELDS)
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 6dfde352ca..0a856ca062 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -30,6 +30,8 @@ class JSCollection : public JSObject {
static const int kAddFunctionDescriptorIndex = 3;
+ DECL_VERIFIER(JSCollection)
+
OBJECT_CONSTRUCTORS(JSCollection, JSObject);
};
@@ -114,6 +116,8 @@ class JSWeakCollection : public JSObject {
static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
int max_entries);
+ DECL_VERIFIER(JSWeakCollection)
+
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSWEAK_COLLECTION_FIELDS)
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 8730e0a39b..db7ba27312 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -56,13 +56,13 @@ class PatternItem {
std::vector<const char*> allowed_values;
};
-static const std::vector<PatternItem> BuildPatternItems() {
+static std::vector<PatternItem> BuildPatternItems() {
const std::vector<const char*> kLongShort = {"long", "short"};
const std::vector<const char*> kNarrowLongShort = {"narrow", "long", "short"};
const std::vector<const char*> k2DigitNumeric = {"2-digit", "numeric"};
const std::vector<const char*> kNarrowLongShort2DigitNumeric = {
"narrow", "long", "short", "2-digit", "numeric"};
- const std::vector<PatternItem> kPatternItems = {
+ std::vector<PatternItem> items = {
PatternItem("weekday",
{{"EEEEE", "narrow"},
{"EEEE", "long"},
@@ -75,38 +75,59 @@ static const std::vector<PatternItem> BuildPatternItems() {
{{"GGGGG", "narrow"}, {"GGGG", "long"}, {"GGG", "short"}},
kNarrowLongShort),
PatternItem("year", {{"yy", "2-digit"}, {"y", "numeric"}},
- k2DigitNumeric),
- // Sometimes we get L instead of M for month - standalone name.
- PatternItem("month",
- {{"MMMMM", "narrow"},
- {"MMMM", "long"},
- {"MMM", "short"},
- {"MM", "2-digit"},
- {"M", "numeric"},
- {"LLLLL", "narrow"},
- {"LLLL", "long"},
- {"LLL", "short"},
- {"LL", "2-digit"},
- {"L", "numeric"}},
- kNarrowLongShort2DigitNumeric),
- PatternItem("day", {{"dd", "2-digit"}, {"d", "numeric"}}, k2DigitNumeric),
- PatternItem("hour",
- {{"HH", "2-digit"},
- {"H", "numeric"},
- {"hh", "2-digit"},
- {"h", "numeric"},
- {"kk", "2-digit"},
- {"k", "numeric"},
- {"KK", "2-digit"},
- {"K", "numeric"}},
- k2DigitNumeric),
- PatternItem("minute", {{"mm", "2-digit"}, {"m", "numeric"}},
- k2DigitNumeric),
- PatternItem("second", {{"ss", "2-digit"}, {"s", "numeric"}},
- k2DigitNumeric),
- PatternItem("timeZoneName", {{"zzzz", "long"}, {"z", "short"}},
- kLongShort)};
- return kPatternItems;
+ k2DigitNumeric)};
+ if (FLAG_harmony_intl_dateformat_quarter) {
+ items.push_back(PatternItem("quarter",
+ {{"QQQQQ", "narrow"},
+ {"QQQQ", "long"},
+ {"QQQ", "short"},
+ {"qqqqq", "narrow"},
+ {"qqqq", "long"},
+ {"qqq", "short"}},
+ kNarrowLongShort));
+ }
+ // Sometimes we get L instead of M for month - standalone name.
+ items.push_back(PatternItem("month",
+ {{"MMMMM", "narrow"},
+ {"MMMM", "long"},
+ {"MMM", "short"},
+ {"MM", "2-digit"},
+ {"M", "numeric"},
+ {"LLLLL", "narrow"},
+ {"LLLL", "long"},
+ {"LLL", "short"},
+ {"LL", "2-digit"},
+ {"L", "numeric"}},
+ kNarrowLongShort2DigitNumeric));
+ items.push_back(PatternItem("day", {{"dd", "2-digit"}, {"d", "numeric"}},
+ k2DigitNumeric));
+ if (FLAG_harmony_intl_dateformat_day_period) {
+ items.push_back(PatternItem("dayPeriod",
+ {{"BBBBB", "narrow"},
+ {"bbbbb", "narrow"},
+ {"BBBB", "long"},
+ {"bbbb", "long"},
+ {"B", "short"},
+ {"b", "short"}},
+ kNarrowLongShort));
+ }
+ items.push_back(PatternItem("hour",
+ {{"HH", "2-digit"},
+ {"H", "numeric"},
+ {"hh", "2-digit"},
+ {"h", "numeric"},
+ {"kk", "2-digit"},
+ {"k", "numeric"},
+ {"KK", "2-digit"},
+ {"K", "numeric"}},
+ k2DigitNumeric));
+ items.push_back(PatternItem("minute", {{"mm", "2-digit"}, {"m", "numeric"}},
+ k2DigitNumeric));
+ items.push_back(PatternItem("second", {{"ss", "2-digit"}, {"s", "numeric"}},
+ k2DigitNumeric));
+ items.push_back(PatternItem("timeZoneName",
+ {{"zzzz", "long"}, {"z", "short"}}, kLongShort));
+ return items;
}
class PatternItems {
@@ -348,6 +369,16 @@ Handle<String> DateTimeStyleAsString(Isolate* isolate,
}
}
+int FractionalSecondDigitsFromPattern(const std::string& pattern) {
+ int result = 0;
+ for (size_t i = 0; i < pattern.length() && result < 3; i++) {
+ if (pattern[i] == 'S') {
+ result++;
+ }
+ }
+ return result;
+}
+
} // namespace
// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
@@ -532,6 +563,13 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
Just(kDontThrow))
.FromJust());
}
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ int fsd = FractionalSecondDigitsFromPattern(pattern);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->fractionalSecondDigits_string(),
+ factory->NewNumberFromInt(fsd), Just(kDontThrow))
+ .FromJust());
+ }
return options;
}
@@ -643,17 +681,14 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
JSFunction::cast(
isolate->context().native_context().intl_date_time_format_function()),
isolate);
- Handle<JSObject> obj;
+ Handle<Map> map;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
- String);
+ isolate, map,
+ JSFunction::GetDerivedMap(isolate, constructor, constructor), String);
Handle<JSDateTimeFormat> date_time_format;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, date_time_format,
- JSDateTimeFormat::Initialize(isolate, Handle<JSDateTimeFormat>::cast(obj),
- locales, internal_options),
- String);
+ JSDateTimeFormat::New(isolate, map, locales, internal_options), String);
if (can_cache) {
isolate->set_icu_object_in_cache(
@@ -669,27 +704,23 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
namespace {
Maybe<bool> IsPropertyUndefined(Isolate* isolate, Handle<JSObject> options,
- const char* property) {
- Factory* factory = isolate->factory();
+ Handle<String> property) {
// i. Let prop be the property name.
// ii. Let value be ? Get(options, prop).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetPropertyOrElement(
- isolate, options, factory->NewStringFromAsciiChecked(property)),
+ isolate, value, Object::GetPropertyOrElement(isolate, options, property),
Nothing<bool>());
return Just(value->IsUndefined(isolate));
}
Maybe<bool> NeedsDefault(Isolate* isolate, Handle<JSObject> options,
- const std::vector<std::string>& props) {
+ const std::vector<Handle<String>>& props) {
bool needs_default = true;
for (const auto& prop : props) {
// i. Let prop be the property name.
// ii. Let value be ? Get(options, prop)
- Maybe<bool> maybe_undefined =
- IsPropertyUndefined(isolate, options, prop.c_str());
+ Maybe<bool> maybe_undefined = IsPropertyUndefined(isolate, options, prop);
MAYBE_RETURN(maybe_undefined, Nothing<bool>());
// iii. If value is not undefined, let needDefaults be false.
if (!maybe_undefined.FromJust()) {
@@ -741,8 +772,15 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
// 4. If required is "date" or "any", then
if (required == RequiredOption::kAny || required == RequiredOption::kDate) {
- // a. For each of the property names "weekday", "year", "month", "day", do
- const std::vector<std::string> list({"weekday", "year", "month", "day"});
+ // a. For each of the property names "weekday", "year", "quarter", "month",
+ // "day", do
+ std::vector<Handle<String>> list(
+ {factory->weekday_string(), factory->year_string()});
+ if (FLAG_harmony_intl_dateformat_quarter) {
+ list.push_back(factory->quarter_string());
+ }
+ list.push_back(factory->month_string());
+ list.push_back(factory->day_string());
Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list);
MAYBE_RETURN(maybe_needs_default, Handle<JSObject>());
needs_default = maybe_needs_default.FromJust();
@@ -750,8 +788,18 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
// 5. If required is "time" or "any", then
if (required == RequiredOption::kAny || required == RequiredOption::kTime) {
- // a. For each of the property names "hour", "minute", "second", do
- const std::vector<std::string> list({"hour", "minute", "second"});
+ // a. For each of the property names "dayPeriod", "hour", "minute",
+ // "second", "fractionalSecondDigits", do
+ std::vector<Handle<String>> list;
+ if (FLAG_harmony_intl_dateformat_day_period) {
+ list.push_back(factory->dayPeriod_string());
+ }
+ list.push_back(factory->hour_string());
+ list.push_back(factory->minute_string());
+ list.push_back(factory->second_string());
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ list.push_back(factory->fractionalSecondDigits_string());
+ }
Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list);
MAYBE_RETURN(maybe_needs_default, Handle<JSObject>());
needs_default &= maybe_needs_default.FromJust();
@@ -890,7 +938,7 @@ icu::Calendar* CreateCalendar(Isolate* isolate, const icu::Locale& icu_locale,
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) {
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
// See https://github.com/tc39/ecma402/issues/225 . The best pattern
// generation needs to be done in the base locale according to the
// current spec however odd it may be. See also crbug.com/826549 .
@@ -920,9 +968,9 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
class DateFormatCache {
public:
- icu::SimpleDateFormat* Create(const icu::Locale& icu_locale,
- const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) {
+ icu::SimpleDateFormat* Create(
+ const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
std::string key;
skeleton.toUTF8String<std::string>(key);
key += ":";
@@ -951,7 +999,7 @@ class DateFormatCache {
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) {
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
static base::LazyInstance<DateFormatCache>::type cache =
LAZY_INSTANCE_INITIALIZER;
return std::unique_ptr<icu::SimpleDateFormat>(
@@ -1087,7 +1135,8 @@ icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
JSDateTimeFormat::DateTimeStyle date_style,
JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale,
- Intl::HourCycle hc, icu::DateTimePatternGenerator& generator) {
+ Intl::HourCycle hc,
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
std::unique_ptr<icu::SimpleDateFormat> result;
if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
@@ -1156,10 +1205,9 @@ class DateTimePatternGeneratorCache {
enum FormatMatcherOption { kBestFit, kBasic };
// ecma402/#sec-initializedatetimeformat
-MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
- Handle<Object> locales, Handle<Object> input_options) {
- date_time_format->set_flags(0);
+MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> input_options) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
@@ -1347,7 +1395,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
}
}
}
- date_time_format->set_hour_cycle(hc);
DateTimeStyle date_style = DateTimeStyle::kUndefined;
DateTimeStyle time_style = DateTimeStyle::kUndefined;
@@ -1367,9 +1414,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to
// dateStyle.
date_style = maybe_date_style.FromJust();
- if (date_style != DateTimeStyle::kUndefined) {
- date_time_format->set_date_style(date_style);
- }
// 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
// "full", "long", "medium", "short" »).
@@ -1385,9 +1429,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to
// timeStyle.
time_style = maybe_time_style.FromJust();
- if (time_style != DateTimeStyle::kUndefined) {
- date_time_format->set_time_style(time_style);
- }
// 32. If dateStyle or timeStyle are not undefined, then
if (date_style != DateTimeStyle::kUndefined ||
@@ -1419,6 +1460,16 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
skeleton += item.map.find(input.get())->second;
}
}
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ Maybe<int> maybe_fsd = Intl::GetNumberOption(
+ isolate, options, factory->fractionalSecondDigits_string(), 0, 3, 0);
+ MAYBE_RETURN(maybe_fsd, MaybeHandle<JSDateTimeFormat>());
+ // Convert fractionalSecondDigits to skeleton.
+ int fsd = maybe_fsd.FromJust();
+ for (int i = 0; i < fsd; i++) {
+ skeleton += "S";
+ }
+ }
enum FormatMatcherOption { kBestFit, kBasic };
// We implement only best fit algorithm, but still need to check
@@ -1451,7 +1502,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// g. If dateTimeFormat.[[Hour]] is not undefined, then
if (!has_hour_option) {
// h. Else, i. Set dateTimeFormat.[[HourCycle]] to undefined.
- date_time_format->set_hour_cycle(Intl::HourCycle::kUndefined);
+ hc = Intl::HourCycle::kUndefined;
}
}
@@ -1477,8 +1528,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
maybe_hour_cycle.FromJust() != Intl::HourCycle::kUndefined) {
auto hc_extension_it = r.extensions.find("hc");
if (hc_extension_it != r.extensions.end()) {
- if (date_time_format->hour_cycle() !=
- Intl::ToHourCycle(hc_extension_it->second.c_str())) {
+ if (hc != Intl::ToHourCycle(hc_extension_it->second.c_str())) {
// Remove -hc- if it does not agree with what we used.
UErrorCode status = U_ZERO_ERROR;
icu_locale.setUnicodeKeywordValue("hc", nullptr, status);
@@ -1490,16 +1540,28 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Handle<Managed<icu::Locale>> managed_locale =
Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
- date_time_format->set_icu_locale(*managed_locale);
Handle<Managed<icu::SimpleDateFormat>> managed_format =
Managed<icu::SimpleDateFormat>::FromUniquePtr(isolate, 0,
std::move(icu_date_format));
- date_time_format->set_icu_simple_date_format(*managed_format);
Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
Managed<icu::DateIntervalFormat>::FromRawPtr(isolate, 0, nullptr);
- date_time_format->set_icu_date_interval_format(*managed_interval_format);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSDateTimeFormat> date_time_format = Handle<JSDateTimeFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ date_time_format->set_flags(0);
+ date_time_format->set_hour_cycle(hc);
+ if (date_style != DateTimeStyle::kUndefined) {
+ date_time_format->set_date_style(date_style);
+ }
+ if (time_style != DateTimeStyle::kUndefined) {
+ date_time_format->set_time_style(time_style);
+ }
+ date_time_format->set_icu_locale(*managed_locale);
+ date_time_format->set_icu_simple_date_format(*managed_format);
+ date_time_format->set_icu_date_interval_format(*managed_interval_format);
return date_time_format;
}
@@ -1516,6 +1578,9 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
case UDAT_EXTENDED_YEAR_FIELD:
case UDAT_YEAR_NAME_FIELD:
return isolate->factory()->year_string();
+ case UDAT_QUARTER_FIELD:
+ case UDAT_STANDALONE_QUARTER_FIELD:
+ return isolate->factory()->quarter_string();
case UDAT_MONTH_FIELD:
case UDAT_STANDALONE_MONTH_FIELD:
return isolate->factory()->month_string();
@@ -1535,6 +1600,8 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
case UDAT_STANDALONE_DAY_FIELD:
return isolate->factory()->weekday_string();
case UDAT_AM_PM_FIELD:
+ case UDAT_AM_PM_MIDNIGHT_NOON_FIELD:
+ case UDAT_FLEXIBLE_DAY_PERIOD_FIELD:
return isolate->factory()->dayPeriod_string();
case UDAT_TIMEZONE_FIELD:
case UDAT_TIMEZONE_RFC_FIELD:
@@ -1546,6 +1613,8 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
return isolate->factory()->timeZoneName_string();
case UDAT_ERA_FIELD:
return isolate->factory()->era_string();
+ case UDAT_FRACTIONAL_SECOND_FIELD:
+ return isolate->factory()->fractionalSecond_string();
default:
// Other UDAT_*_FIELD's cannot show up because there is no way to specify
// them via options of Intl.DateTimeFormat.
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index 664ccdcdf7..f4a8ccc8f5 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -32,9 +32,9 @@ namespace internal {
class JSDateTimeFormat : public JSObject {
public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> Initialize(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format);
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index 96e61c2205..6a1529ad33 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -27,7 +27,7 @@ ACCESSORS(JSListFormat, icu_formatter, Managed<icu::ListFormatter>,
SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset)
inline void JSListFormat::set_style(Style style) {
- DCHECK_GT(Style::COUNT, style);
+ DCHECK_GE(StyleBits::kMax, style);
int hints = flags();
hints = StyleBits::update(hints, style);
set_flags(hints);
@@ -38,7 +38,7 @@ inline JSListFormat::Style JSListFormat::style() const {
}
inline void JSListFormat::set_type(Type type) {
- DCHECK_GT(Type::COUNT, type);
+ DCHECK_GE(TypeBits::kMax, type);
int hints = flags();
hints = TypeBits::update(hints, type);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 84691194ec..4f303b1874 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -50,8 +50,6 @@ const char* GetIcuStyleString(JSListFormat::Style style,
return kStandardShort;
case JSListFormat::Style::NARROW:
return kStandardNarrow;
- case JSListFormat::Style::COUNT:
- UNREACHABLE();
}
case JSListFormat::Type::DISJUNCTION:
switch (style) {
@@ -61,8 +59,6 @@ const char* GetIcuStyleString(JSListFormat::Style style,
return kOrShort;
case JSListFormat::Style::NARROW:
return kOrNarrow;
- case JSListFormat::Style::COUNT:
- UNREACHABLE();
}
case JSListFormat::Type::UNIT:
switch (style) {
@@ -72,12 +68,9 @@ const char* GetIcuStyleString(JSListFormat::Style style,
return kUnitShort;
case JSListFormat::Style::NARROW:
return kUnitNarrow;
- case JSListFormat::Style::COUNT:
- UNREACHABLE();
}
- case JSListFormat::Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
} // namespace
@@ -114,11 +107,9 @@ JSListFormat::Type get_type(const char* str) {
UNREACHABLE();
}
-MaybeHandle<JSListFormat> JSListFormat::Initialize(
- Isolate* isolate, Handle<JSListFormat> list_format, Handle<Object> locales,
- Handle<Object> input_options) {
- list_format->set_flags(0);
-
+MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> input_options) {
Handle<JSReceiver> options;
// 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
@@ -156,11 +147,8 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
Intl::ResolvedLocale r =
Intl::ResolveLocale(isolate, JSListFormat::GetAvailableLocales(),
requested_locales, matcher, {});
-
- // 11. Set listFormat.[[Locale]] to r.[[Locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- list_format->set_locale(*locale_str);
// 12. Let t be GetOption(options, "type", "string", «"conjunction",
// "disjunction", "unit"», "conjunction").
@@ -171,9 +159,6 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
MAYBE_RETURN(maybe_type, MaybeHandle<JSListFormat>());
Type type_enum = maybe_type.FromJust();
- // 13. Set listFormat.[[Type]] to t.
- list_format->set_type(type_enum);
-
// 14. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
@@ -182,9 +167,6 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
MAYBE_RETURN(maybe_style, MaybeHandle<JSListFormat>());
Style style_enum = maybe_style.FromJust();
- // 15. Set listFormat.[[Style]] to s.
- list_format->set_style(style_enum);
-
icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
icu::ListFormatter* formatter = icu::ListFormatter::createInstance(
@@ -198,7 +180,22 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
Handle<Managed<icu::ListFormatter>> managed_formatter =
Managed<icu::ListFormatter>::FromRawPtr(isolate, 0, formatter);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSListFormat> list_format = Handle<JSListFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ list_format->set_flags(0);
list_format->set_icu_formatter(*managed_formatter);
+
+ // 11. Set listFormat.[[Locale]] to r.[[Locale]].
+ list_format->set_locale(*locale_str);
+
+ // 13. Set listFormat.[[Type]] to t.
+ list_format->set_type(type_enum);
+
+ // 15. Set listFormat.[[Style]] to s.
+ list_format->set_style(style_enum);
+
return list_format;
}
@@ -234,9 +231,8 @@ Handle<String> JSListFormat::StyleAsString() const {
return GetReadOnlyRoots().short_string_handle();
case Style::NARROW:
return GetReadOnlyRoots().narrow_string_handle();
- case Style::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
Handle<String> JSListFormat::TypeAsString() const {
@@ -247,9 +243,8 @@ Handle<String> JSListFormat::TypeAsString() const {
return GetReadOnlyRoots().disjunction_string_handle();
case Type::UNIT:
return GetReadOnlyRoots().unit_string_handle();
- case Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
namespace {
@@ -375,11 +370,20 @@ MaybeHandle<JSArray> JSListFormat::FormatListToParts(
FormattedListToJSArray);
}
+namespace {
+
+struct CheckListPattern {
+ static const char* key() { return "listPattern"; }
+ static const char* path() { return nullptr; }
+};
+
+} // namespace
+
const std::set<std::string>& JSListFormat::GetAvailableLocales() {
- // Since ListFormatter does not have a method to list all supported
- // locales, use the one in icu::Locale per comments in
- // ICU FR at https://unicode-org.atlassian.net/browse/ICU-20015
- return Intl::GetAvailableLocalesForLocale();
+ static base::LazyInstance<
+ Intl::AvailableLocales<icu::Locale, CheckListPattern>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 0284d05d42..df937722e6 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -30,11 +30,11 @@ namespace internal {
class JSListFormat : public JSObject {
public:
- // Initializes relative time format object with properties derived from input
+ // Creates relative time format object with properties derived from input
// locales and options.
- static MaybeHandle<JSListFormat> Initialize(
- Isolate* isolate, Handle<JSListFormat> list_format_holder,
- Handle<Object> locales, Handle<Object> options);
+ static MaybeHandle<JSListFormat> New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options);
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
Handle<JSListFormat> format_holder);
@@ -64,10 +64,9 @@ class JSListFormat : public JSObject {
//
// ecma402/#sec-properties-of-intl-listformat-instances
enum class Style {
- LONG, // Everything spelled out.
- SHORT, // Abbreviations used when possible.
- NARROW, // Use the shortest possible form.
- COUNT
+ LONG, // Everything spelled out.
+ SHORT, // Abbreviations used when possible.
+ NARROW // Use the shortest possible form.
};
inline void set_style(Style style);
inline Style style() const;
@@ -78,8 +77,7 @@ class JSListFormat : public JSObject {
enum class Type {
CONJUNCTION, // for "and"-based lists (e.g., "A, B and C")
DISJUNCTION, // for "or"-based lists (e.g., "A, B or C"),
- UNIT, // for lists of values with units (e.g., "5 pounds, 12 ounces").
- COUNT
+ UNIT // for lists of values with units (e.g., "5 pounds, 12 ounces").
};
inline void set_type(Type type);
inline Type type() const;
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 509f9a3069..4a66ea9eca 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -313,10 +313,9 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
} // namespace
-MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
- Handle<JSLocale> locale,
- Handle<String> locale_str,
- Handle<JSReceiver> options) {
+MaybeHandle<JSLocale> JSLocale::New(Isolate* isolate, Handle<Map> map,
+ Handle<String> locale_str,
+ Handle<JSReceiver> options) {
icu::LocaleBuilder builder;
Maybe<bool> maybe_apply =
ApplyOptionsToTag(isolate, locale_str, options, &builder);
@@ -341,8 +340,12 @@ MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
// 31. Set locale.[[Locale]] to r.[[locale]].
Handle<Managed<icu::Locale>> managed_locale =
Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
- locale->set_icu_locale(*managed_locale);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSLocale> locale = Handle<JSLocale>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ locale->set_icu_locale(*managed_locale);
return locale;
}
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 1a833e0e18..e1806e6b7f 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -27,12 +27,11 @@ namespace internal {
class JSLocale : public JSObject {
public:
- // Initializes locale object with properties derived from input locale string
+ // Creates locale object with properties derived from input locale string
// and options.
- static MaybeHandle<JSLocale> Initialize(Isolate* isolate,
- Handle<JSLocale> locale_holder,
- Handle<String> locale,
- Handle<JSReceiver> options);
+ static MaybeHandle<JSLocale> New(Isolate* isolate, Handle<Map> map,
+ Handle<String> locale,
+ Handle<JSReceiver> options);
static Handle<String> Maximize(Isolate* isolate, String locale);
static Handle<String> Minimize(Isolate* isolate, String locale);
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index bd76dfe556..afdfef89f2 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -66,6 +66,17 @@ inline void JSNumberFormat::set_maximum_fraction_digits(int digits) {
set_flags(hints);
}
+inline void JSNumberFormat::set_style(Style style) {
+ DCHECK_GE(StyleBits::kMax, style);
+ int hints = flags();
+ hints = StyleBits::update(hints, style);
+ set_flags(hints);
+}
+
+inline JSNumberFormat::Style JSNumberFormat::style() const {
+ return StyleBits::decode(flags());
+}
+
CAST_ACCESSOR(JSNumberFormat)
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 67d545e0be..d1e3ef4d0c 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -31,19 +31,9 @@ namespace internal {
namespace {
-// [[Style]] is one of the values "decimal", "percent", "currency",
-// or "unit" identifying the style of the number format.
-// Note: "unit" is added in proposal-unified-intl-numberformat
-enum class Style {
- DECIMAL,
- PERCENT,
- CURRENCY,
- UNIT,
-};
-
// [[CurrencyDisplay]] is one of the values "code", "symbol", "name",
-// or "narrow-symbol" identifying the display of the currency number format.
-// Note: "narrow-symbol" is added in proposal-unified-intl-numberformat
+// or "narrowSymbol" identifying the display of the currency number format.
+// Note: "narrowSymbol" is added in proposal-unified-intl-numberformat
enum class CurrencyDisplay {
CODE,
SYMBOL,
@@ -62,8 +52,8 @@ enum class CurrencySign {
// [[UnitDisplay]] is one of the String values "short", "narrow", or "long",
// specifying whether to display the unit as a symbol, narrow symbol, or
-// localized long name if formatting with the "unit" or "percent" style. It is
-// only used when [[Style]] has the value "unit" or "percent".
+// localized long name if formatting with the "unit" style. It is
+// only used when [[Style]] has the value "unit".
enum class UnitDisplay {
SHORT,
NARROW,
@@ -95,7 +85,7 @@ enum class CompactDisplay {
};
// [[SignDisplay]] is one of the String values "auto", "always", "never", or
-// "except-zero", specifying whether to show the sign on negative numbers
+// "exceptZero", specifying whether to show the sign on negative numbers
// only, positive and negative numbers including zero, neither positive nor
// negative numbers, or positive and negative numbers but not zero.
enum class SignDisplay {
@@ -164,7 +154,9 @@ icu::number::Notation ToICUNotation(Notation notation,
return icu::number::Notation::scientific();
case Notation::ENGINEERING:
return icu::number::Notation::engineering();
+ // 29. If notation is "compact", then
case Notation::COMPACT:
+ // 29. a. Set numberFormat.[[CompactDisplay]] to compactDisplay.
if (compact_display == CompactDisplay::SHORT) {
return icu::number::Notation::compactShort();
}
@@ -195,7 +187,9 @@ std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
CHECK(U_SUCCESS(status));
std::map<const std::string, icu::MeasureUnit> map;
for (auto it = units.begin(); it != units.end(); ++it) {
- if (sanctioned.count(it->getSubtype()) > 0) {
+ // Need to skip none/percent
+ if (sanctioned.count(it->getSubtype()) > 0 &&
+ strcmp("none", it->getType()) != 0) {
map[it->getSubtype()] = *it;
}
}
@@ -304,38 +298,16 @@ bool IsWellFormedCurrencyCode(const std::string& currency) {
return (IsAToZ(currency[0]) && IsAToZ(currency[1]) && IsAToZ(currency[2]));
}
-// Parse the 'style' from the skeleton.
-Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
- // Ex: skeleton as
- // "percent precision-integer rounding-mode-half-up scale/100"
- if (skeleton.indexOf("percent") >= 0 && skeleton.indexOf("scale/100") >= 0) {
- return Style::PERCENT;
- }
- // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
- if (skeleton.indexOf("currency") >= 0) {
- return Style::CURRENCY;
- }
- // Ex: skeleton as
- // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow"
- // or special case for "percent .### rounding-mode-half-up"
- if (skeleton.indexOf("measure-unit") >= 0 ||
- skeleton.indexOf("percent") >= 0) {
- return Style::UNIT;
- }
- // Ex: skeleton as ".### rounding-mode-half-up"
- return Style::DECIMAL;
-}
-
// Return the style as a String.
-Handle<String> StyleAsString(Isolate* isolate, Style style) {
+Handle<String> StyleAsString(Isolate* isolate, JSNumberFormat::Style style) {
switch (style) {
- case Style::PERCENT:
+ case JSNumberFormat::Style::PERCENT:
return ReadOnlyRoots(isolate).percent_string_handle();
- case Style::CURRENCY:
+ case JSNumberFormat::Style::CURRENCY:
return ReadOnlyRoots(isolate).currency_string_handle();
- case Style::UNIT:
+ case JSNumberFormat::Style::UNIT:
return ReadOnlyRoots(isolate).unit_string_handle();
- case Style::DECIMAL:
+ case JSNumberFormat::Style::DECIMAL:
return ReadOnlyRoots(isolate).decimal_string_handle();
}
UNREACHABLE();
@@ -357,7 +329,7 @@ Handle<String> CurrencyDisplayString(Isolate* isolate,
// Ex: skeleton as
// "currency/TWD .00 rounding-mode-half-up unit-width-narrow;
if (skeleton.indexOf("unit-width-narrow") >= 0) {
- return ReadOnlyRoots(isolate).narrow_symbol_string_handle();
+ return ReadOnlyRoots(isolate).narrowSymbol_string_handle();
}
// Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
return ReadOnlyRoots(isolate).symbol_string_handle();
@@ -480,11 +452,13 @@ Handle<String> SignDisplayString(Isolate* isolate,
// "currency/TWD .00 rounding-mode-half-up sign-except-zero"
if (skeleton.indexOf("sign-accounting-except-zero") >= 0 ||
skeleton.indexOf("sign-except-zero") >= 0) {
- return ReadOnlyRoots(isolate).except_zero_string_handle();
+ return ReadOnlyRoots(isolate).exceptZero_string_handle();
}
return ReadOnlyRoots(isolate).auto_string_handle();
}
+} // anonymous namespace
+
// Return the minimum integer digits by counting the number of '0' after
// "integer-width/+" in the skeleton.
// Ex: Return 15 for skeleton as
@@ -492,7 +466,8 @@ Handle<String> SignDisplayString(Isolate* isolate,
// 1
// 123456789012345
// Return default value as 1 if there are no "integer-width/+".
-int32_t MinimumIntegerDigitsFromSkeleton(const icu::UnicodeString& skeleton) {
+int32_t JSNumberFormat::MinimumIntegerDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton) {
// count the number of 0 after "integer-width/+"
icu::UnicodeString search("integer-width/+");
int32_t index = skeleton.indexOf(search);
@@ -515,8 +490,8 @@ int32_t MinimumIntegerDigitsFromSkeleton(const icu::UnicodeString& skeleton) {
// 123
// 4567
// Set The minimum as 3 and maximum as 7.
-bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
- int32_t* minimum, int32_t* maximum) {
+bool JSNumberFormat::FractionDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton, int32_t* minimum, int32_t* maximum) {
icu::UnicodeString search(".");
int32_t index = skeleton.indexOf(search);
if (index < 0) return false;
@@ -542,8 +517,8 @@ bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
// 12345
// 6789012
// Set The minimum as 5 and maximum as 12.
-bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
- int32_t* minimum, int32_t* maximum) {
+bool JSNumberFormat::SignificantDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton, int32_t* minimum, int32_t* maximum) {
icu::UnicodeString search("@");
int32_t index = skeleton.indexOf(search);
if (index < 0) return false;
@@ -561,6 +536,8 @@ bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
return true;
}
+namespace {
+
// Ex: percent .### rounding-mode-half-up
// Special case for "percent"
// Ex: "measure-unit/length-kilometer per-measure-unit/duration-hour .###
@@ -630,6 +607,34 @@ std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
} // anonymous namespace
+icu::number::LocalizedNumberFormatter
+JSNumberFormat::SetDigitOptionsToFormatter(
+ const icu::number::LocalizedNumberFormatter& icu_number_formatter,
+ const Intl::NumberFormatDigitOptions& digit_options) {
+ icu::number::LocalizedNumberFormatter result = icu_number_formatter;
+ if (digit_options.minimum_integer_digits > 1) {
+ result = result.integerWidth(icu::number::IntegerWidth::zeroFillTo(
+ digit_options.minimum_integer_digits));
+ }
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Value -1 of minimum_significant_digits represent the roundingtype is
+ // "compact-rounding".
+ if (digit_options.minimum_significant_digits < 0) {
+ return result;
+ }
+ }
+ icu::number::Precision precision =
+ (digit_options.minimum_significant_digits > 0)
+ ? icu::number::Precision::minMaxSignificantDigits(
+ digit_options.minimum_significant_digits,
+ digit_options.maximum_significant_digits)
+ : icu::number::Precision::minMaxFraction(
+ digit_options.minimum_fraction_digits,
+ digit_options.maximum_fraction_digits);
+
+ return result.precision(precision);
+}
+
// static
// ecma402 #sec-intl.numberformat.prototype.resolvedoptions
Handle<JSObject> JSNumberFormat::ResolvedOptions(
@@ -642,9 +647,6 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
icu::UnicodeString skeleton = icu_number_formatter->toSkeleton(status);
CHECK(U_SUCCESS(status));
- std::string s_str;
- s_str = skeleton.toUTF8String<std::string>(s_str);
-
// 4. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
@@ -680,7 +682,7 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
Just(kDontThrow))
.FromJust());
}
- Style style = StyleFromSkeleton(skeleton);
+ JSNumberFormat::Style style = number_format->style();
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->style_string(),
StyleAsString(isolate, style), Just(kDontThrow))
@@ -706,15 +708,15 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
}
if (FLAG_harmony_intl_numberformat_unified) {
- std::string unit = UnitFromSkeleton(skeleton);
- if (!unit.empty()) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->unit_string(),
- isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
- Just(kDontThrow))
- .FromJust());
- }
- if (style == Style::UNIT || style == Style::PERCENT) {
+ if (style == JSNumberFormat::Style::UNIT) {
+ std::string unit = UnitFromSkeleton(skeleton);
+ if (!unit.empty()) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->unit_string(),
+ isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
+ Just(kDontThrow))
+ .FromJust());
+ }
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->unitDisplay_string(),
UnitDisplayString(isolate, skeleton), Just(kDontThrow))
@@ -827,10 +829,10 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
}
// static
-MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
- Isolate* isolate, Handle<JSNumberFormat> number_format,
- Handle<Object> locales, Handle<Object> options_obj) {
- number_format->set_flags(0);
+MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
+ Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options_obj) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
@@ -898,7 +900,6 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// 9. Set numberFormat.[[Locale]] to r.[[locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- number_format->set_locale(*locale_str);
// 11. Let dataLocale be r.[[dataLocale]].
@@ -911,17 +912,19 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
const char* service = "Intl.NumberFormat";
std::vector<const char*> style_str_values({"decimal", "percent", "currency"});
- std::vector<Style> style_enum_values(
- {Style::DECIMAL, Style::PERCENT, Style::CURRENCY});
+ std::vector<JSNumberFormat::Style> style_enum_values(
+ {JSNumberFormat::Style::DECIMAL, JSNumberFormat::Style::PERCENT,
+ JSNumberFormat::Style::CURRENCY});
if (FLAG_harmony_intl_numberformat_unified) {
style_str_values.push_back("unit");
- style_enum_values.push_back(Style::UNIT);
+ style_enum_values.push_back(JSNumberFormat::Style::UNIT);
}
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
- isolate, options, "style", service, style_str_values, style_enum_values,
- Style::DECIMAL);
+ Maybe<JSNumberFormat::Style> maybe_style =
+ Intl::GetStringOption<JSNumberFormat::Style>(
+ isolate, options, "style", service, style_str_values,
+ style_enum_values, JSNumberFormat::Style::DECIMAL);
MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>());
- Style style = maybe_style.FromJust();
+ JSNumberFormat::Style style = maybe_style.FromJust();
// 13. Set numberFormat.[[Style]] to style.
@@ -952,14 +955,14 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// 16. If style is "currency" and currency is undefined, throw a TypeError
// exception.
- if (style == Style::CURRENCY && !found_currency.FromJust()) {
+ if (style == JSNumberFormat::Style::CURRENCY && !found_currency.FromJust()) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCurrencyCode),
JSNumberFormat);
}
// 17. If style is "currency", then
int c_digits = 0;
icu::UnicodeString currency_ustr;
- if (style == Style::CURRENCY) {
+ if (style == JSNumberFormat::Style::CURRENCY) {
// a. Let currency be the result of converting currency to upper case as
// specified in 6.1
std::transform(currency.begin(), currency.end(), currency.begin(), toupper);
@@ -975,7 +978,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
std::vector<CurrencyDisplay> currency_display_enum_values(
{CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL, CurrencyDisplay::NAME});
if (FLAG_harmony_intl_numberformat_unified) {
- currency_display_str_values.push_back("narrow-symbol");
+ currency_display_str_values.push_back("narrowSymbol");
currency_display_enum_values.push_back(CurrencyDisplay::NARROW_SYMBOL);
}
Maybe<CurrencyDisplay> maybe_currency_display =
@@ -1020,13 +1023,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>());
UnitDisplay unit_display = maybe_unit_display.FromJust();
- // If style is "percent", then
- if (style == Style::PERCENT) {
- // Let unit be "concentr-percent".
- unit = "percent";
- }
- // If style is "unit" or "percent", then
- if (style == Style::PERCENT || style == Style::UNIT) {
+ // If style is "unit", then
+ if (style == JSNumberFormat::Style::UNIT) {
// If unit is undefined, throw a TypeError exception.
if (unit == "") {
THROW_NEW_ERROR(
@@ -1070,12 +1068,12 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
}
}
- if (style == Style::PERCENT) {
+ if (style == JSNumberFormat::Style::PERCENT) {
icu_number_formatter = icu_number_formatter.unit(icu::NoUnit::percent())
.scale(icu::number::Scale::powerOfTen(2));
}
- if (style == Style::CURRENCY) {
+ if (style == JSNumberFormat::Style::CURRENCY) {
// 19. If style is "currency", set numberFormat.[[CurrencyDisplay]] to
// currencyDisplay.
@@ -1099,19 +1097,19 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
}
}
- // 20. If style is "currency", then
+ // 23. If style is "currency", then
int mnfd_default, mxfd_default;
- if (style == Style::CURRENCY) {
+ if (style == JSNumberFormat::Style::CURRENCY) {
// a. Let mnfdDefault be cDigits.
// b. Let mxfdDefault be cDigits.
mnfd_default = c_digits;
mxfd_default = c_digits;
+ // 24. Else,
} else {
- // 21. Else,
// a. Let mnfdDefault be 0.
mnfd_default = 0;
// b. If style is "percent", then
- if (style == Style::PERCENT) {
+ if (style == JSNumberFormat::Style::PERCENT) {
// i. Let mxfdDefault be 0.
mxfd_default = 0;
} else {
@@ -1120,51 +1118,11 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
mxfd_default = 3;
}
}
- // 22. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
- // mnfdDefault, mxfdDefault).
- Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
- Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
- mxfd_default);
- MAYBE_RETURN(maybe_digit_options, Handle<JSNumberFormat>());
- Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
-
- icu::number::Precision precision =
- (digit_options.minimum_significant_digits > 0)
- ? icu::number::Precision::minMaxSignificantDigits(
- digit_options.minimum_significant_digits,
- digit_options.maximum_significant_digits)
- : icu::number::Precision::minMaxFraction(
- digit_options.minimum_fraction_digits,
- digit_options.maximum_fraction_digits);
-
- if (digit_options.minimum_significant_digits > 0) {
- // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
- // uncondictionally while the unified number proposal eventually will only
- // record either (Min|Max)imumFractionDigits or
- // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only
- // remember one set, and during 2019-1-17 ECMA402 meeting that the committee
- // decide not to take a PR to address that prior to the unified number
- // proposal, we have to add these two 5 bits int into flags to remember the
- // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is
- // present.
- // TODO(ftang) remove the following two lines once we ship
- // int-number-format-unified
- number_format->set_minimum_fraction_digits(
- digit_options.minimum_fraction_digits);
- number_format->set_maximum_fraction_digits(
- digit_options.maximum_fraction_digits);
- }
-
- icu_number_formatter = icu_number_formatter.precision(precision);
- if (digit_options.minimum_integer_digits > 1) {
- icu_number_formatter =
- icu_number_formatter.integerWidth(icu::number::IntegerWidth::zeroFillTo(
- digit_options.minimum_integer_digits));
- }
+ Notation notation = Notation::STANDARD;
if (FLAG_harmony_intl_numberformat_unified) {
- // Let notation be ? GetOption(options, "notation", "string", « "standard",
- // "scientific", "engineering", "compact" », "standard").
+ // 25. Let notation be ? GetOption(options, "notation", "string", «
+ // "standard", "scientific", "engineering", "compact" », "standard").
Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
isolate, options, "notation", service,
{"standard", "scientific", "engineering", "compact"},
@@ -1172,10 +1130,23 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Notation::COMPACT},
Notation::STANDARD);
MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>());
- Notation notation = maybe_notation.FromJust();
+ notation = maybe_notation.FromJust();
+ }
+
+ // 27. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
+ // mnfdDefault, mxfdDefault).
+ Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
+ Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
+ mxfd_default,
+ notation == Notation::COMPACT);
+ MAYBE_RETURN(maybe_digit_options, Handle<JSNumberFormat>());
+ Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
+ icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter(
+ icu_number_formatter, digit_options);
- // Let compactDisplay be ? GetOption(options, "compactDisplay", "string", «
- // "short", "long" », "short").
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // 28. Let compactDisplay be ? GetOption(options, "compactDisplay",
+ // "string", « "short", "long" », "short").
Maybe<CompactDisplay> maybe_compact_display =
Intl::GetStringOption<CompactDisplay>(
isolate, options, "compactDisplay", service, {"short", "long"},
@@ -1184,6 +1155,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
CompactDisplay compact_display = maybe_compact_display.FromJust();
+ // 26. Set numberFormat.[[Notation]] to notation.
// The default notation in ICU is Simple, which mapped from STANDARD
// so we can skip setting it.
if (notation != Notation::STANDARD) {
@@ -1191,30 +1163,31 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
ToICUNotation(notation, compact_display));
}
}
- // 23. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
+ // 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
// undefined, true).
bool use_grouping = true;
Maybe<bool> found_use_grouping = Intl::GetBoolOption(
isolate, options, "useGrouping", service, &use_grouping);
MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
- // 24. Set numberFormat.[[UseGrouping]] to useGrouping.
+ // 31. Set numberFormat.[[UseGrouping]] to useGrouping.
if (!use_grouping) {
icu_number_formatter = icu_number_formatter.grouping(
UNumberGroupingStrategy::UNUM_GROUPING_OFF);
}
if (FLAG_harmony_intl_numberformat_unified) {
- // Let signDisplay be ? GetOption(options, "signDisplay", "string", «
- // "auto", "never", "always", "except-zero" », "auto").
+ // 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", «
+ // "auto", "never", "always", "exceptZero" », "auto").
Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
isolate, options, "signDisplay", service,
- {"auto", "never", "always", "except-zero"},
+ {"auto", "never", "always", "exceptZero"},
{SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
SignDisplay::EXCEPT_ZERO},
SignDisplay::AUTO);
MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>());
SignDisplay sign_display = maybe_sign_display.FromJust();
+ // 33. Set numberFormat.[[SignDisplay]] to signDisplay.
// The default sign in ICU is UNUM_SIGN_AUTO which is mapped from
// SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting
// under that values for optimization.
@@ -1244,6 +1217,33 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
isolate, 0,
new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSNumberFormat> number_format = Handle<JSNumberFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ number_format->set_flags(0);
+ number_format->set_style(style);
+ number_format->set_locale(*locale_str);
+
+ if (digit_options.minimum_significant_digits > 0) {
+ // The current ECMA 402 spec mandates recording (Min|Max)imumFractionDigits
+ // unconditionally, while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or
+ // (Min|Max)imumSignificantDigits. Since LocalizedNumberFormatter can only
+ // remember one set, and during 2019-1-17 ECMA402 meeting the committee
+ // decided not to take a PR to address that prior to the unified number
+ // proposal, we have to add these two 5-bit ints into flags to remember the
+ // (Min|Max)imumFractionDigits while (Min|Max)imumSignificantDigits is
+ // present.
+ // TODO(ftang) remove the following two lines once we ship
+ // int-number-format-unified
+ number_format->set_minimum_fraction_digits(
+ digit_options.minimum_fraction_digits);
+ number_format->set_maximum_fraction_digits(
+ digit_options.maximum_fraction_digits);
+ }
+
number_format->set_icu_number_formatter(*managed_number_formatter);
number_format->set_bound_format(*factory->undefined_value());
@@ -1417,7 +1417,7 @@ namespace {
Maybe<int> ConstructParts(Isolate* isolate, const icu::UnicodeString& formatted,
icu::FieldPositionIterator* fp_iter,
Handle<JSArray> result, int start_index,
- Handle<Object> numeric_obj, Handle<String> unit) {
+ Handle<Object> numeric_obj, bool style_is_unit) {
DCHECK(numeric_obj->IsNumeric());
int32_t length = formatted.length();
int index = start_index;
@@ -1442,21 +1442,23 @@ Maybe<int> ConstructParts(Isolate* isolate, const icu::UnicodeString& formatted,
for (auto it = parts.begin(); it < parts.end(); it++) {
NumberFormatSpan part = *it;
- Handle<String> field_type_string =
- part.field_id == -1
- ? isolate->factory()->literal_string()
- : Intl::NumberFieldToType(isolate, numeric_obj, part.field_id);
+ Handle<String> field_type_string = isolate->factory()->literal_string();
+ if (part.field_id != -1) {
+ if (style_is_unit && static_cast<UNumberFormatFields>(part.field_id) ==
+ UNUM_PERCENT_FIELD) {
+ // Special case when style is unit.
+ field_type_string = isolate->factory()->unit_string();
+ } else {
+ field_type_string =
+ Intl::NumberFieldToType(isolate, numeric_obj, part.field_id);
+ }
+ }
Handle<String> substring;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, substring,
Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos),
Nothing<int>());
- if (unit.is_null()) {
- Intl::AddElement(isolate, result, index, field_type_string, substring);
- } else {
- Intl::AddElement(isolate, result, index, field_type_string, substring,
- isolate->factory()->unit_string(), unit);
- }
+ Intl::AddElement(isolate, result, index, field_type_string, substring);
++index;
}
JSObject::ValidateElements(*result);
@@ -1480,16 +1482,26 @@ MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
MAYBE_RETURN(maybe_format, Handle<JSArray>());
Handle<JSArray> result = factory->NewJSArray(0);
- Maybe<int> maybe_format_to_parts =
- ConstructParts(isolate, maybe_format.FromJust(), &fp_iter, result, 0,
- numeric_obj, Handle<String>());
+ Maybe<int> maybe_format_to_parts = ConstructParts(
+ isolate, maybe_format.FromJust(), &fp_iter, result, 0, numeric_obj,
+ number_format->style() == JSNumberFormat::Style::UNIT);
MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
return result;
}
+namespace {
+
+struct CheckNumberElements {
+ static const char* key() { return "NumberElements"; }
+ static const char* path() { return nullptr; }
+};
+
+} // namespace
+
const std::set<std::string>& JSNumberFormat::GetAvailableLocales() {
- static base::LazyInstance<Intl::AvailableLocales<icu::NumberFormat>>::type
+ static base::LazyInstance<
+ Intl::AvailableLocales<icu::NumberFormat, CheckNumberElements>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 6c59e76f7a..2979ab10f4 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -17,14 +17,15 @@
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
#include "src/objects/objects.h"
-#include "unicode/numberformatter.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
-class NumberFormat;
class UnicodeString;
+namespace number {
+class LocalizedNumberFormatter;
+} // namespace number
} // namespace U_ICU_NAMESPACE
namespace v8 {
@@ -33,9 +34,9 @@ namespace internal {
class JSNumberFormat : public JSObject {
public:
// ecma402/#sec-initializenumberformat
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> Initialize(
- Isolate* isolate, Handle<JSNumberFormat> number_format,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
// ecma402/#sec-unwrapnumberformat
V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> UnwrapNumberFormat(
@@ -56,6 +57,17 @@ class JSNumberFormat : public JSObject {
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
+ // Helper functions shared with JSPluralRules.
+ static int32_t MinimumIntegerDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton);
+ static bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum);
+ static bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum);
+ static icu::number::LocalizedNumberFormatter SetDigitOptionsToFormatter(
+ const icu::number::LocalizedNumberFormatter& icu_number_formatter,
+ const Intl::NumberFormatDigitOptions& digit_options);
+
DECL_CAST(JSNumberFormat)
DECL_PRINTER(JSNumberFormat)
DECL_VERIFIER(JSNumberFormat)
@@ -80,6 +92,14 @@ class JSNumberFormat : public JSObject {
inline int maximum_fraction_digits() const;
inline void set_maximum_fraction_digits(int digits);
+ // [[Style]] is one of the values "decimal", "percent", "currency",
+ // or "unit" identifying the style of the number format.
+ // Note: "unit" is added in proposal-unified-intl-numberformat
+ enum class Style { DECIMAL, PERCENT, CURRENCY, UNIT };
+
+ inline void set_style(Style style);
+ inline Style style() const;
+
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSNUMBER_FORMAT_FIELDS)
@@ -87,13 +107,18 @@ class JSNumberFormat : public JSObject {
// Bit positions in |flags|.
#define FLAGS_BIT_FIELDS(V, _) \
V(MinimumFractionDigitsBits, int, 5, _) \
- V(MaximumFractionDigitsBits, int, 5, _)
+ V(MaximumFractionDigitsBits, int, 5, _) \
+ V(StyleBits, Style, 2, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
STATIC_ASSERT(20 <= MinimumFractionDigitsBits::kMax);
STATIC_ASSERT(20 <= MaximumFractionDigitsBits::kMax);
+ STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax);
+ STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax);
+ STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax);
+ STATIC_ASSERT(Style::UNIT <= StyleBits::kMax);
DECL_ACCESSORS(locale, String)
DECL_ACCESSORS(icu_number_formatter,
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 6b7a7d72f0..10672d4443 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/js-objects.h"
#include "src/heap/heap-write-barrier.h"
+#include "src/objects/elements.h"
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/feedback-vector.h"
@@ -29,17 +30,17 @@ namespace v8 {
namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(JSObject, JSReceiver)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSDate, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy)
JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper)
OBJECT_CONSTRUCTORS_IMPL(JSStringIterator, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSValue, JSObject)
NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
@@ -48,13 +49,10 @@ CAST_ACCESSOR(JSBoundFunction)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSIteratorResult)
CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSStringIterator)
-CAST_ACCESSOR(JSValue)
MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
@@ -130,11 +128,6 @@ bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject object) {
ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
-FixedArrayBase JSObject::elements() const {
- Object array = READ_FIELD(*this, kElementsOffset);
- return FixedArrayBase::cast(array);
-}
-
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(*object);
ElementsKind elements_kind = object->map().elements_kind();
@@ -225,39 +218,34 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
void JSObject::SetMapAndElements(Handle<JSObject> object, Handle<Map> new_map,
Handle<FixedArrayBase> value) {
- JSObject::MigrateToMap(object, new_map);
+ Isolate* isolate = object->GetIsolate();
+ JSObject::MigrateToMap(isolate, object, new_map);
DCHECK((object->map().has_fast_smi_or_object_elements() ||
- (*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
+ (*value == ReadOnlyRoots(isolate).empty_fixed_array()) ||
object->map().has_fast_string_wrapper_elements()) ==
- (value->map() == object->GetReadOnlyRoots().fixed_array_map() ||
- value->map() == object->GetReadOnlyRoots().fixed_cow_array_map()));
- DCHECK((*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
+ (value->map() == ReadOnlyRoots(isolate).fixed_array_map() ||
+ value->map() == ReadOnlyRoots(isolate).fixed_cow_array_map()));
+ DCHECK((*value == ReadOnlyRoots(isolate).empty_fixed_array()) ||
(object->map().has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
}
-void JSObject::set_elements(FixedArrayBase value, WriteBarrierMode mode) {
- WRITE_FIELD(*this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kElementsOffset, value, mode);
-}
-
void JSObject::initialize_elements() {
FixedArrayBase elements = map().GetInitialElements();
- WRITE_FIELD(*this, kElementsOffset, elements);
+ set_elements(elements, SKIP_WRITE_BARRIER);
}
-InterceptorInfo JSObject::GetIndexedInterceptor() {
- return map().GetIndexedInterceptor();
+DEF_GETTER(JSObject, GetIndexedInterceptor, InterceptorInfo) {
+ return map(isolate).GetIndexedInterceptor(isolate);
}
-InterceptorInfo JSObject::GetNamedInterceptor() {
- return map().GetNamedInterceptor();
+DEF_GETTER(JSObject, GetNamedInterceptor, InterceptorInfo) {
+ return map(isolate).GetNamedInterceptor(isolate);
}
-int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
-
-int JSObject::GetHeaderSize(const Map map) {
+// static
+int JSObject::GetHeaderSize(Map map) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
@@ -268,7 +256,7 @@ int JSObject::GetHeaderSize(const Map map) {
}
// static
-int JSObject::GetEmbedderFieldsStartOffset(const Map map) {
+int JSObject::GetEmbedderFieldsStartOffset(Map map) {
// Embedder fields are located after the object header.
return GetHeaderSize(map);
}
@@ -278,7 +266,7 @@ int JSObject::GetEmbedderFieldsStartOffset() {
}
// static
-int JSObject::GetEmbedderFieldCount(const Map map) {
+int JSObject::GetEmbedderFieldCount(Map map) {
int instance_size = map.instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
// Embedder fields are located after the object header, whereas in-object
@@ -314,29 +302,39 @@ void JSObject::SetEmbedderField(int index, Smi value) {
EmbedderDataSlot(*this, index).store_smi(value);
}
-bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
+bool JSObject::IsUnboxedDoubleField(FieldIndex index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return IsUnboxedDoubleField(isolate, index);
+}
+
+bool JSObject::IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
- return map().IsUnboxedDoubleField(index);
+ return map(isolate).IsUnboxedDoubleField(isolate, index);
}
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object JSObject::RawFastPropertyAt(FieldIndex index) {
- DCHECK(!IsUnboxedDoubleField(index));
+Object JSObject::RawFastPropertyAt(FieldIndex index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return RawFastPropertyAt(isolate, index);
+}
+
+Object JSObject::RawFastPropertyAt(Isolate* isolate, FieldIndex index) const {
+ DCHECK(!IsUnboxedDoubleField(isolate, index));
if (index.is_inobject()) {
- return READ_FIELD(*this, index.offset());
+ return TaggedField<Object>::load(isolate, *this, index.offset());
} else {
- return property_array().get(index.outobject_array_index());
+ return property_array(isolate).get(isolate, index.outobject_array_index());
}
}
-double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
+double JSObject::RawFastDoublePropertyAt(FieldIndex index) const {
DCHECK(IsUnboxedDoubleField(index));
return ReadField<double>(index.offset());
}
-uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
+uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
DCHECK(IsUnboxedDoubleField(index));
return ReadField<uint64_t>(index.offset());
}
@@ -417,7 +415,7 @@ int JSObject::GetInObjectPropertyOffset(int index) {
Object JSObject::InObjectPropertyAt(int index) {
int offset = GetInObjectPropertyOffset(index);
- return READ_FIELD(*this, offset);
+ return TaggedField<Object>::load(*this, offset);
}
Object JSObject::InObjectPropertyAtPut(int index, Object value,
@@ -452,10 +450,6 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
}
-Object JSBoundFunction::raw_bound_target_function() const {
- return READ_FIELD(*this, kBoundTargetFunctionOffset);
-}
-
ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
kBoundTargetFunctionOffset)
ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
@@ -466,8 +460,6 @@ ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSGlobalProxy, kGlobalProxyOffset)
-ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
-
FeedbackVector JSFunction::feedback_vector() const {
DCHECK(has_feedback_vector());
return FeedbackVector::cast(raw_feedback_cell().value());
@@ -564,7 +556,8 @@ void JSFunction::set_code_no_write_barrier(Code value) {
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
}
-SharedFunctionInfo JSFunction::shared() const {
+// TODO(ishell): Why relaxed read but release store?
+DEF_GETTER(JSFunction, shared, SharedFunctionInfo) {
return SharedFunctionInfo::cast(
RELAXED_READ_FIELD(*this, kSharedFunctionInfoOffset));
}
@@ -606,11 +599,11 @@ bool JSFunction::has_closure_feedback_cell_array() const {
}
Context JSFunction::context() {
- return Context::cast(READ_FIELD(*this, kContextOffset));
+ return TaggedField<Context, kContextOffset>::load(*this);
}
bool JSFunction::has_context() const {
- return READ_FIELD(*this, kContextOffset).IsContext();
+ return TaggedField<HeapObject, kContextOffset>::load(*this).IsContext();
}
JSGlobalProxy JSFunction::global_proxy() { return context().global_proxy(); }
@@ -619,65 +612,73 @@ NativeContext JSFunction::native_context() {
return context().native_context();
}
-void JSFunction::set_context(Object value) {
+void JSFunction::set_context(HeapObject value) {
DCHECK(value.IsUndefined() || value.IsContext());
WRITE_FIELD(*this, kContextOffset, value);
WRITE_BARRIER(*this, kContextOffset, value);
}
-ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
+ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, HeapObject,
kPrototypeOrInitialMapOffset, map().has_prototype_slot())
-bool JSFunction::has_prototype_slot() const {
- return map().has_prototype_slot();
+DEF_GETTER(JSFunction, has_prototype_slot, bool) {
+ return map(isolate).has_prototype_slot();
}
-Map JSFunction::initial_map() { return Map::cast(prototype_or_initial_map()); }
+DEF_GETTER(JSFunction, initial_map, Map) {
+ return Map::cast(prototype_or_initial_map(isolate));
+}
-bool JSFunction::has_initial_map() {
- DCHECK(has_prototype_slot());
- return prototype_or_initial_map().IsMap();
+DEF_GETTER(JSFunction, has_initial_map, bool) {
+ DCHECK(has_prototype_slot(isolate));
+ return prototype_or_initial_map(isolate).IsMap(isolate);
}
-bool JSFunction::has_instance_prototype() {
- DCHECK(has_prototype_slot());
- return has_initial_map() || !prototype_or_initial_map().IsTheHole();
+DEF_GETTER(JSFunction, has_instance_prototype, bool) {
+ DCHECK(has_prototype_slot(isolate));
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ return has_initial_map(isolate) ||
+ !prototype_or_initial_map(isolate).IsTheHole(
+ GetReadOnlyRoots(isolate));
}
-bool JSFunction::has_prototype() {
- DCHECK(has_prototype_slot());
- return map().has_non_instance_prototype() || has_instance_prototype();
+DEF_GETTER(JSFunction, has_prototype, bool) {
+ DCHECK(has_prototype_slot(isolate));
+ return map(isolate).has_non_instance_prototype() ||
+ has_instance_prototype(isolate);
}
-bool JSFunction::has_prototype_property() {
- return (has_prototype_slot() && IsConstructor()) ||
- IsGeneratorFunction(shared().kind());
+DEF_GETTER(JSFunction, has_prototype_property, bool) {
+ return (has_prototype_slot(isolate) && IsConstructor(isolate)) ||
+ IsGeneratorFunction(shared(isolate).kind());
}
-bool JSFunction::PrototypeRequiresRuntimeLookup() {
- return !has_prototype_property() || map().has_non_instance_prototype();
+DEF_GETTER(JSFunction, PrototypeRequiresRuntimeLookup, bool) {
+ return !has_prototype_property(isolate) ||
+ map(isolate).has_non_instance_prototype();
}
-HeapObject JSFunction::instance_prototype() {
- DCHECK(has_instance_prototype());
- if (has_initial_map()) return initial_map().prototype();
+DEF_GETTER(JSFunction, instance_prototype, HeapObject) {
+ DCHECK(has_instance_prototype(isolate));
+ if (has_initial_map(isolate)) return initial_map(isolate).prototype(isolate);
// When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
- return HeapObject::cast(prototype_or_initial_map());
+ return HeapObject::cast(prototype_or_initial_map(isolate));
}
-Object JSFunction::prototype() {
- DCHECK(has_prototype());
+DEF_GETTER(JSFunction, prototype, Object) {
+ DCHECK(has_prototype(isolate));
// If the function's prototype property has been set to a non-JSReceiver
// value, that value is stored in the constructor field of the map.
- if (map().has_non_instance_prototype()) {
- Object prototype = map().GetConstructor();
+ if (map(isolate).has_non_instance_prototype()) {
+ Object prototype = map(isolate).GetConstructor(isolate);
// The map must have a prototype in that field, not a back pointer.
- DCHECK(!prototype.IsMap());
- DCHECK(!prototype.IsFunctionTemplateInfo());
+ DCHECK(!prototype.IsMap(isolate));
+ DCHECK(!prototype.IsFunctionTemplateInfo(isolate));
return prototype;
}
- return instance_prototype();
+ return instance_prototype(isolate);
}
bool JSFunction::is_compiled() const {
@@ -711,8 +712,6 @@ void JSFunction::ResetIfBytecodeFlushed() {
}
}
-ACCESSORS(JSValue, value, Object, kValueOffset)
-
ACCESSORS(JSDate, value, Object, kValueOffset)
ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
ACCESSORS(JSDate, year, Object, kYearOffset)
@@ -738,12 +737,11 @@ int JSMessageObject::GetEndPosition() const {
}
MessageTemplate JSMessageObject::type() const {
- Object value = READ_FIELD(*this, kMessageTypeOffset);
- return MessageTemplateFromInt(Smi::ToInt(value));
+ return MessageTemplateFromInt(raw_type());
}
void JSMessageObject::set_type(MessageTemplate value) {
- WRITE_FIELD(*this, kMessageTypeOffset, Smi::FromInt(static_cast<int>(value)));
+ set_raw_type(static_cast<int>(value));
}
ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
@@ -754,143 +752,154 @@ ACCESSORS(JSMessageObject, bytecode_offset, Smi, kBytecodeOffsetOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
+SMI_ACCESSORS(JSMessageObject, raw_type, kMessageTypeOffset)
-ElementsKind JSObject::GetElementsKind() const {
- ElementsKind kind = map().elements_kind();
+DEF_GETTER(JSObject, GetElementsKind, ElementsKind) {
+ ElementsKind kind = map(isolate).elements_kind();
#if VERIFY_HEAP && DEBUG
- FixedArrayBase fixed_array =
- FixedArrayBase::unchecked_cast(READ_FIELD(*this, kElementsOffset));
+ FixedArrayBase fixed_array = FixedArrayBase::unchecked_cast(
+ TaggedField<HeapObject, kElementsOffset>::load(isolate, *this));
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- if (ElementsAreSafeToExamine()) {
- Map map = fixed_array.map();
+ if (ElementsAreSafeToExamine(isolate)) {
+ Map map = fixed_array.map(isolate);
if (IsSmiOrObjectElementsKind(kind)) {
- DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
- map == GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK(map == GetReadOnlyRoots(isolate).fixed_array_map() ||
+ map == GetReadOnlyRoots(isolate).fixed_cow_array_map());
} else if (IsDoubleElementsKind(kind)) {
- DCHECK(fixed_array.IsFixedDoubleArray() ||
- fixed_array == GetReadOnlyRoots().empty_fixed_array());
+ DCHECK(fixed_array.IsFixedDoubleArray(isolate) ||
+ fixed_array == GetReadOnlyRoots(isolate).empty_fixed_array());
} else if (kind == DICTIONARY_ELEMENTS) {
- DCHECK(fixed_array.IsFixedArray());
- DCHECK(fixed_array.IsNumberDictionary());
+ DCHECK(fixed_array.IsFixedArray(isolate));
+ DCHECK(fixed_array.IsNumberDictionary(isolate));
} else {
DCHECK(kind > DICTIONARY_ELEMENTS || IsFrozenOrSealedElementsKind(kind));
}
- DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
- (elements().IsFixedArray() && elements().length() >= 2));
+ DCHECK(
+ !IsSloppyArgumentsElementsKind(kind) ||
+ (elements(isolate).IsFixedArray() && elements(isolate).length() >= 2));
}
#endif
return kind;
}
-bool JSObject::HasObjectElements() {
- return IsObjectElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, GetElementsAccessor, ElementsAccessor*) {
+ return ElementsAccessor::ForKind(GetElementsKind(isolate));
+}
+
+DEF_GETTER(JSObject, HasObjectElements, bool) {
+ return IsObjectElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasSmiElements() { return IsSmiElementsKind(GetElementsKind()); }
+DEF_GETTER(JSObject, HasSmiElements, bool) {
+ return IsSmiElementsKind(GetElementsKind(isolate));
+}
-bool JSObject::HasSmiOrObjectElements() {
- return IsSmiOrObjectElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasSmiOrObjectElements, bool) {
+ return IsSmiOrObjectElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasDoubleElements() {
- return IsDoubleElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasDoubleElements, bool) {
+ return IsDoubleElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasHoleyElements() {
- return IsHoleyElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasHoleyElements, bool) {
+ return IsHoleyElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastElements() {
- return IsFastElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasFastElements, bool) {
+ return IsFastElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastPackedElements() {
- return IsFastPackedElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasFastPackedElements, bool) {
+ return IsFastPackedElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasDictionaryElements() {
- return GetElementsKind() == DICTIONARY_ELEMENTS;
+DEF_GETTER(JSObject, HasDictionaryElements, bool) {
+ return GetElementsKind(isolate) == DICTIONARY_ELEMENTS;
}
-bool JSObject::HasPackedElements() {
- return GetElementsKind() == PACKED_ELEMENTS;
+DEF_GETTER(JSObject, HasPackedElements, bool) {
+ return GetElementsKind(isolate) == PACKED_ELEMENTS;
}
-bool JSObject::HasFrozenOrSealedElements() {
- return IsFrozenOrSealedElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasFrozenOrSealedElements, bool) {
+ return IsFrozenOrSealedElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasSealedElements() {
- return IsSealedElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasSealedElements, bool) {
+ return IsSealedElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastArgumentsElements() {
- return GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+DEF_GETTER(JSObject, HasFastArgumentsElements, bool) {
+ return GetElementsKind(isolate) == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
-bool JSObject::HasSlowArgumentsElements() {
- return GetElementsKind() == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+DEF_GETTER(JSObject, HasSlowArgumentsElements, bool) {
+ return GetElementsKind(isolate) == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
}
-bool JSObject::HasSloppyArgumentsElements() {
- return IsSloppyArgumentsElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasSloppyArgumentsElements, bool) {
+ return IsSloppyArgumentsElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasStringWrapperElements() {
- return IsStringWrapperElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasStringWrapperElements, bool) {
+ return IsStringWrapperElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastStringWrapperElements() {
- return GetElementsKind() == FAST_STRING_WRAPPER_ELEMENTS;
+DEF_GETTER(JSObject, HasFastStringWrapperElements, bool) {
+ return GetElementsKind(isolate) == FAST_STRING_WRAPPER_ELEMENTS;
}
-bool JSObject::HasSlowStringWrapperElements() {
- return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
+DEF_GETTER(JSObject, HasSlowStringWrapperElements, bool) {
+ return GetElementsKind(isolate) == SLOW_STRING_WRAPPER_ELEMENTS;
}
-bool JSObject::HasTypedArrayElements() {
- DCHECK(!elements().is_null());
- return map().has_typed_array_elements();
+DEF_GETTER(JSObject, HasTypedArrayElements, bool) {
+ DCHECK(!elements(isolate).is_null());
+ return map(isolate).has_typed_array_elements();
}
#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
- bool JSObject::HasFixed##Type##Elements() { \
- return map().elements_kind() == TYPE##_ELEMENTS; \
+ DEF_GETTER(JSObject, HasFixed##Type##Elements, bool) { \
+ return map(isolate).elements_kind() == TYPE##_ELEMENTS; \
}
TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
#undef FIXED_TYPED_ELEMENTS_CHECK
-bool JSObject::HasNamedInterceptor() { return map().has_named_interceptor(); }
+DEF_GETTER(JSObject, HasNamedInterceptor, bool) {
+ return map(isolate).has_named_interceptor();
+}
-bool JSObject::HasIndexedInterceptor() {
- return map().has_indexed_interceptor();
+DEF_GETTER(JSObject, HasIndexedInterceptor, bool) {
+ return map(isolate).has_indexed_interceptor();
}
-void JSGlobalObject::set_global_dictionary(GlobalDictionary dictionary) {
- DCHECK(IsJSGlobalObject());
- set_raw_properties_or_hash(dictionary);
+DEF_GETTER(JSGlobalObject, global_dictionary, GlobalDictionary) {
+ DCHECK(!HasFastProperties(isolate));
+ DCHECK(IsJSGlobalObject(isolate));
+ return GlobalDictionary::cast(raw_properties_or_hash(isolate));
}
-GlobalDictionary JSGlobalObject::global_dictionary() {
- DCHECK(!HasFastProperties());
+void JSGlobalObject::set_global_dictionary(GlobalDictionary dictionary) {
DCHECK(IsJSGlobalObject());
- return GlobalDictionary::cast(raw_properties_or_hash());
+ set_raw_properties_or_hash(dictionary);
}
-NumberDictionary JSObject::element_dictionary() {
- DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
- return NumberDictionary::cast(elements());
+DEF_GETTER(JSObject, element_dictionary, NumberDictionary) {
+ DCHECK(HasDictionaryElements(isolate) ||
+ HasSlowStringWrapperElements(isolate));
+ return NumberDictionary::cast(elements(isolate));
}
-void JSReceiver::initialize_properties() {
- ReadOnlyRoots roots = GetReadOnlyRoots();
+void JSReceiver::initialize_properties(Isolate* isolate) {
+ ReadOnlyRoots roots(isolate);
DCHECK(!ObjectInYoungGeneration(roots.empty_fixed_array()));
DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
- if (map().is_dictionary_map()) {
+ if (map(isolate).is_dictionary_map()) {
WRITE_FIELD(*this, kPropertiesOrHashOffset,
roots.empty_property_dictionary());
} else {
@@ -898,36 +907,36 @@ void JSReceiver::initialize_properties() {
}
}
-bool JSReceiver::HasFastProperties() const {
- DCHECK(raw_properties_or_hash().IsSmi() ||
- ((raw_properties_or_hash().IsGlobalDictionary() ||
- raw_properties_or_hash().IsNameDictionary()) ==
- map().is_dictionary_map()));
- return !map().is_dictionary_map();
+DEF_GETTER(JSReceiver, HasFastProperties, bool) {
+ DCHECK(raw_properties_or_hash(isolate).IsSmi() ||
+ ((raw_properties_or_hash(isolate).IsGlobalDictionary(isolate) ||
+ raw_properties_or_hash(isolate).IsNameDictionary(isolate)) ==
+ map(isolate).is_dictionary_map()));
+ return !map(isolate).is_dictionary_map();
}
-NameDictionary JSReceiver::property_dictionary() const {
- DCHECK(!IsJSGlobalObject());
- DCHECK(!HasFastProperties());
-
- Object prop = raw_properties_or_hash();
+DEF_GETTER(JSReceiver, property_dictionary, NameDictionary) {
+ DCHECK(!IsJSGlobalObject(isolate));
+ DCHECK(!HasFastProperties(isolate));
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ Object prop = raw_properties_or_hash(isolate);
if (prop.IsSmi()) {
- return GetReadOnlyRoots().empty_property_dictionary();
+ return GetReadOnlyRoots(isolate).empty_property_dictionary();
}
-
return NameDictionary::cast(prop);
}
// TODO(gsathya): Pass isolate directly to this function and access
// the heap from this.
-PropertyArray JSReceiver::property_array() const {
- DCHECK(HasFastProperties());
-
- Object prop = raw_properties_or_hash();
- if (prop.IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
- return GetReadOnlyRoots().empty_property_array();
+DEF_GETTER(JSReceiver, property_array, PropertyArray) {
+ DCHECK(HasFastProperties(isolate));
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ Object prop = raw_properties_or_hash(isolate);
+ if (prop.IsSmi() || prop == GetReadOnlyRoots(isolate).empty_fixed_array()) {
+ return GetReadOnlyRoots(isolate).empty_property_array();
}
-
return PropertyArray::cast(prop);
}
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 317837a99f..5c4db16206 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -8,6 +8,7 @@
#include "src/codegen/compiler.h"
#include "src/date/date.h"
#include "src/execution/arguments.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
#include "src/handles/maybe-handles.h"
@@ -194,15 +195,16 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
return Just(!source->IsString() || String::cast(*source).length() == 0);
}
+ Isolate* isolate = target->GetIsolate();
+
// If the target is deprecated, the object will be updated on first store. If
// the source for that store equals the target, this will invalidate the
// cached representation of the source. Preventively upgrade the target.
// Do this on each iteration since any property load could cause deprecation.
if (target->map().is_deprecated()) {
- JSObject::MigrateInstance(Handle<JSObject>::cast(target));
+ JSObject::MigrateInstance(isolate, Handle<JSObject>::cast(target));
}
- Isolate* isolate = target->GetIsolate();
Handle<Map> map(JSReceiver::cast(*source).map(), isolate);
if (!map->IsJSObjectMap()) return Just(false);
@@ -374,8 +376,8 @@ String JSReceiver::class_name() {
TYPED_ARRAYS(SWITCH_KIND)
#undef SWITCH_KIND
}
- if (IsJSValue()) {
- Object value = JSValue::cast(*this).value();
+ if (IsJSPrimitiveWrapper()) {
+ Object value = JSPrimitiveWrapper::cast(*this).value();
if (value.IsBoolean()) return roots.Boolean_string();
if (value.IsString()) return roots.String_string();
if (value.IsNumber()) return roots.Number_string();
@@ -1092,7 +1094,8 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
Maybe<bool> DefinePropertyWithInterceptorInternal(
LookupIterator* it, Handle<InterceptorInfo> interceptor,
- Maybe<ShouldThrow> should_throw, PropertyDescriptor& desc) {
+ Maybe<ShouldThrow> should_throw,
+ PropertyDescriptor& desc) { // NOLINT(runtime/references)
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -1827,6 +1830,13 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
int number_of_own_elements =
object->GetElementsAccessor()->GetCapacity(*object, object->elements());
+
+ if (number_of_own_elements >
+ FixedArray::kMaxLength - number_of_own_descriptors) {
+ isolate->Throw(*isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ return Nothing<bool>();
+ }
Handle<FixedArray> values_or_entries = isolate->factory()->NewFixedArray(
number_of_own_descriptors + number_of_own_elements);
int count = 0;
@@ -1918,7 +1928,8 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
int length = 0;
for (int i = 0; i < keys->length(); ++i) {
- Handle<Name> key = Handle<Name>::cast(handle(keys->get(i), isolate));
+ Handle<Name> key =
+ Handle<Name>::cast(handle(keys->get(isolate, i), isolate));
if (filter & ONLY_ENUMERABLE) {
PropertyDescriptor descriptor;
@@ -2002,13 +2013,9 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, initial_map,
JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
- Handle<JSObject> result = isolate->factory()->NewJSObjectFromMap(
- initial_map, AllocationType::kYoung, site);
- if (initial_map->is_dictionary_map()) {
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
- result->SetProperties(*dictionary);
- }
+ Handle<JSObject> result = isolate->factory()->NewFastOrSlowJSObjectFromMap(
+ initial_map, NameDictionary::kInitialCapacity, AllocationType::kYoung,
+ site);
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
return result;
@@ -2026,13 +2033,7 @@ MaybeHandle<JSObject> JSObject::ObjectCreate(Isolate* isolate,
Map::GetObjectCreateMap(isolate, Handle<HeapObject>::cast(prototype));
// Actually allocate the object.
- Handle<JSObject> object;
- if (map->is_dictionary_map()) {
- object = isolate->factory()->NewSlowJSObjectFromMap(map);
- } else {
- object = isolate->factory()->NewJSObjectFromMap(map);
- }
- return object;
+ return isolate->factory()->NewFastOrSlowJSObjectFromMap(map);
}
void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
@@ -2072,8 +2073,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSBoundFunction::kSize;
case JS_FUNCTION_TYPE:
return JSFunction::GetHeaderSize(function_has_prototype_slot);
- case JS_VALUE_TYPE:
- return JSValue::kSize;
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ return JSPrimitiveWrapper::kSize;
case JS_DATE_TYPE:
return JSDate::kSize;
case JS_ARRAY_TYPE:
@@ -2423,7 +2424,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
// All other JSObjects are rather similar to each other (JSObject,
- // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
+ // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSPrimitiveWrapper).
default: {
Map map_of_this = map();
Heap* heap = GetHeap();
@@ -2457,9 +2458,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JS%sObject", global_object ? "Global " : "");
}
}
- if (IsJSValue()) {
+ if (IsJSPrimitiveWrapper()) {
accumulator->Add(" value = ");
- JSValue::cast(*this).value().ShortPrint(accumulator);
+ JSPrimitiveWrapper::cast(*this).value().ShortPrint(accumulator);
}
accumulator->Put('>');
break;
@@ -2595,11 +2596,11 @@ namespace {
// to temporarily store the inobject properties.
// * If there are properties left in the backing store, install the backing
// store.
-void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
- Isolate* isolate = object->GetIsolate();
+void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
+ Handle<Map> new_map) {
Handle<Map> old_map(object->map(), isolate);
// In case of a regular transition.
- if (new_map->GetBackPointer() == *old_map) {
+ if (new_map->GetBackPointer(isolate) == *old_map) {
// If the map does not add named properties, simply set the map.
if (old_map->NumberOfOwnDescriptors() ==
new_map->NumberOfOwnDescriptors()) {
@@ -2608,7 +2609,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
// If the map adds a new kDescriptor property, simply set the map.
- PropertyDetails details = new_map->GetLastDescriptorDetails();
+ PropertyDetails details = new_map->GetLastDescriptorDetails(isolate);
if (details.location() == kDescriptor) {
object->synchronized_set_map(*new_map);
return;
@@ -2618,14 +2619,14 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// can also simply set the map (modulo a special case for mutable
// double boxes).
FieldIndex index =
- FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
- if (index.is_inobject() ||
- index.outobject_array_index() < object->property_array().length()) {
+ FieldIndex::ForDescriptor(isolate, *new_map, new_map->LastAdded());
+ if (index.is_inobject() || index.outobject_array_index() <
+ object->property_array(isolate).length()) {
// We still need to allocate MutableHeapNumbers for double fields
// if either double field unboxing is disabled or the double field
// is in the PropertyArray backing store (where we don't support
// double field unboxing).
- if (index.is_double() && !new_map->IsUnboxedDoubleField(index)) {
+ if (index.is_double() && !new_map->IsUnboxedDoubleField(isolate, index)) {
auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
object->RawFastPropertyAtPut(index, *value);
}
@@ -2636,7 +2637,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// This migration is a transition from a map that has run out of property
// space. Extend the backing store.
int grow_by = new_map->UnusedPropertyFields() + 1;
- Handle<PropertyArray> old_storage(object->property_array(), isolate);
+ Handle<PropertyArray> old_storage(object->property_array(isolate), isolate);
Handle<PropertyArray> new_storage =
isolate->factory()->CopyPropertyArrayAndGrow(old_storage, grow_by);
@@ -2682,10 +2683,10 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Handle<FixedArray> inobject_props =
isolate->factory()->NewFixedArray(inobject);
- Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors(),
- isolate);
- Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> old_descriptors(
+ old_map->instance_descriptors(isolate), isolate);
+ Handle<DescriptorArray> new_descriptors(
+ new_map->instance_descriptors(isolate), isolate);
int old_nof = old_map->NumberOfOwnDescriptors();
int new_nof = new_map->NumberOfOwnDescriptors();
@@ -2713,13 +2714,13 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
} else {
DCHECK_EQ(kData, old_details.kind());
- value = handle(old_descriptors->GetStrongValue(i), isolate);
+ value = handle(old_descriptors->GetStrongValue(isolate, i), isolate);
DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
}
} else {
DCHECK_EQ(kField, old_details.location());
- FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
- if (object->IsUnboxedDoubleField(index)) {
+ FieldIndex index = FieldIndex::ForDescriptor(isolate, *old_map, i);
+ if (object->IsUnboxedDoubleField(isolate, index)) {
uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
if (representation.IsDouble()) {
value = isolate->factory()->NewMutableHeapNumberFromBits(old_bits);
@@ -2727,7 +2728,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
value = isolate->factory()->NewHeapNumberFromBits(old_bits);
}
} else {
- value = handle(object->RawFastPropertyAt(index), isolate);
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
DCHECK_IMPLIES(old_representation.IsNone(),
value->IsUninitialized(isolate));
@@ -2779,11 +2780,11 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- Object value = inobject_props->get(i);
+ Object value = inobject_props->get(isolate, i);
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
// yet.
- if (new_map->IsUnboxedDoubleField(index)) {
- DCHECK(value.IsMutableHeapNumber());
+ if (new_map->IsUnboxedDoubleField(isolate, index)) {
+ DCHECK(value.IsMutableHeapNumber(isolate));
// Ensure that all bits of the double value are preserved.
object->RawFastDoublePropertyAsBitsAtPut(
index, MutableHeapNumber::cast(value).value_as_bits());
@@ -2818,19 +2819,19 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
object->synchronized_set_map(*new_map);
}
-void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
+void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
+ Handle<Map> new_map,
int expected_additional_properties) {
// The global object is always normalized.
- DCHECK(!object->IsJSGlobalObject());
+ DCHECK(!object->IsJSGlobalObject(isolate));
// JSGlobalProxy must never be normalized
- DCHECK(!object->IsJSGlobalProxy());
+ DCHECK(!object->IsJSGlobalProxy(isolate));
DCHECK_IMPLIES(new_map->is_prototype_map(),
Map::IsPrototypeChainInvalidated(*new_map));
- Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
- Handle<Map> map(object->map(), isolate);
+ Handle<Map> map(object->map(isolate), isolate);
// Allocate new content.
int real_size = map->NumberOfOwnDescriptors();
@@ -2844,33 +2845,33 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
Handle<NameDictionary> dictionary =
NameDictionary::New(isolate, property_count);
- Handle<DescriptorArray> descs(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descs(map->instance_descriptors(isolate), isolate);
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- Handle<Name> key(descs->GetKey(i), isolate);
+ Handle<Name> key(descs->GetKey(isolate, i), isolate);
Handle<Object> value;
if (details.location() == kField) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ FieldIndex index = FieldIndex::ForDescriptor(isolate, *map, i);
if (details.kind() == kData) {
- if (object->IsUnboxedDoubleField(index)) {
+ if (object->IsUnboxedDoubleField(isolate, index)) {
double old_value = object->RawFastDoublePropertyAt(index);
value = isolate->factory()->NewHeapNumber(old_value);
} else {
- value = handle(object->RawFastPropertyAt(index), isolate);
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (details.representation().IsDouble()) {
- DCHECK(value->IsMutableHeapNumber());
+ DCHECK(value->IsMutableHeapNumber(isolate));
double old_value = Handle<MutableHeapNumber>::cast(value)->value();
value = isolate->factory()->NewHeapNumber(old_value);
}
}
} else {
DCHECK_EQ(kAccessor, details.kind());
- value = handle(object->RawFastPropertyAt(index), isolate);
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
}
} else {
DCHECK_EQ(kDescriptor, details.location());
- value = handle(descs->GetStrongValue(i), isolate);
+ value = handle(descs->GetStrongValue(isolate, i), isolate);
}
DCHECK(!value.is_null());
PropertyDetails d(details.kind(), details.attributes(),
@@ -2932,11 +2933,12 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
} // namespace
-void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
+void JSObject::MigrateToMap(Isolate* isolate, Handle<JSObject> object,
+ Handle<Map> new_map,
int expected_additional_properties) {
- if (object->map() == *new_map) return;
- Handle<Map> old_map(object->map(), object->GetIsolate());
- NotifyMapChange(old_map, new_map, object->GetIsolate());
+ if (object->map(isolate) == *new_map) return;
+ Handle<Map> old_map(object->map(isolate), isolate);
+ NotifyMapChange(old_map, new_map, isolate);
if (old_map->is_dictionary_map()) {
// For slow-to-fast migrations JSObject::MigrateSlowToFast()
@@ -2946,7 +2948,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// Slow-to-slow migration is trivial.
object->synchronized_set_map(*new_map);
} else if (!new_map->is_dictionary_map()) {
- MigrateFastToFast(object, new_map);
+ MigrateFastToFast(isolate, object, new_map);
if (old_map->is_prototype_map()) {
DCHECK(!old_map->is_stable());
DCHECK(new_map->is_stable());
@@ -2958,13 +2960,12 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
old_map->set_owns_descriptors(false);
DCHECK(old_map->is_abandoned_prototype_map());
// Ensure that no transition was inserted for prototype migrations.
- DCHECK_EQ(0, TransitionsAccessor(object->GetIsolate(), old_map)
- .NumberOfTransitions());
- DCHECK(new_map->GetBackPointer().IsUndefined());
- DCHECK(object->map() != *old_map);
+ DCHECK_EQ(0, TransitionsAccessor(isolate, old_map).NumberOfTransitions());
+ DCHECK(new_map->GetBackPointer(isolate).IsUndefined(isolate));
+ DCHECK(object->map(isolate) != *old_map);
}
} else {
- MigrateFastToSlow(object, new_map, expected_additional_properties);
+ MigrateFastToSlow(isolate, object, new_map, expected_additional_properties);
}
// Careful: Don't allocate here!
@@ -2978,11 +2979,11 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
void JSObject::ForceSetPrototype(Handle<JSObject> object,
Handle<HeapObject> proto) {
// object.__proto__ = proto;
- Handle<Map> old_map = Handle<Map>(object->map(), object->GetIsolate());
- Handle<Map> new_map =
- Map::Copy(object->GetIsolate(), old_map, "ForceSetPrototype");
- Map::SetPrototype(object->GetIsolate(), new_map, proto);
- JSObject::MigrateToMap(object, new_map);
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> old_map = Handle<Map>(object->map(), isolate);
+ Handle<Map> new_map = Map::Copy(isolate, old_map, "ForceSetPrototype");
+ Map::SetPrototype(isolate, new_map, proto);
+ JSObject::MigrateToMap(isolate, object, new_map);
}
Maybe<bool> JSObject::SetPropertyWithInterceptor(
@@ -3068,31 +3069,30 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
object->synchronized_set_map(*map);
}
-void JSObject::MigrateInstance(Handle<JSObject> object) {
- Handle<Map> original_map(object->map(), object->GetIsolate());
- Handle<Map> map = Map::Update(object->GetIsolate(), original_map);
+void JSObject::MigrateInstance(Isolate* isolate, Handle<JSObject> object) {
+ Handle<Map> original_map(object->map(), isolate);
+ Handle<Map> map = Map::Update(isolate, original_map);
map->set_is_migration_target(true);
- MigrateToMap(object, map);
+ JSObject::MigrateToMap(isolate, object, map);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, *map);
}
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- object->JSObjectVerify(object->GetIsolate());
+ object->JSObjectVerify(isolate);
}
#endif
}
// static
-bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
+bool JSObject::TryMigrateInstance(Isolate* isolate, Handle<JSObject> object) {
DisallowDeoptimization no_deoptimization(isolate);
Handle<Map> original_map(object->map(), isolate);
Handle<Map> new_map;
if (!Map::TryUpdate(isolate, original_map).ToHandle(&new_map)) {
return false;
}
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (FLAG_trace_migration && *original_map != object->map()) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
}
@@ -3263,16 +3263,18 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
return GetPropertyAttributesWithInterceptorInternal(it, it->GetInterceptor());
}
-void JSObject::NormalizeProperties(Handle<JSObject> object,
+void JSObject::NormalizeProperties(Isolate* isolate, Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties,
const char* reason) {
if (!object->HasFastProperties()) return;
- Handle<Map> map(object->map(), object->GetIsolate());
- Handle<Map> new_map = Map::Normalize(object->GetIsolate(), map, mode, reason);
+ Handle<Map> map(object->map(), isolate);
+ Handle<Map> new_map =
+ Map::Normalize(isolate, map, map->elements_kind(), mode, reason);
- MigrateToMap(object, new_map, expected_additional_properties);
+ JSObject::MigrateToMap(isolate, object, new_map,
+ expected_additional_properties);
}
void JSObject::MigrateSlowToFast(Handle<JSObject> object,
@@ -3475,7 +3477,7 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
: DICTIONARY_ELEMENTS;
Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, target_kind);
// Set the new map first to satify the elements type assert in set_elements().
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (is_sloppy_arguments) {
SloppyArgumentsElements::cast(object->elements())
@@ -3710,7 +3712,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
Map::Copy(isolate, handle(object->map(), isolate), "PreventExtensions");
new_map->set_is_extensible(false);
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
DCHECK(!object->map().is_extensible());
return Just(true);
@@ -3756,6 +3758,21 @@ template void JSObject::ApplyAttributesToDictionary(
Isolate* isolate, ReadOnlyRoots roots, Handle<NumberDictionary> dictionary,
const PropertyAttributes attributes);
+Handle<NumberDictionary> CreateElementDictionary(Isolate* isolate,
+ Handle<JSObject> object) {
+ Handle<NumberDictionary> new_element_dictionary;
+ if (!object->HasTypedArrayElements() && !object->HasDictionaryElements() &&
+ !object->HasSlowStringWrapperElements()) {
+ int length = object->IsJSArray()
+ ? Smi::ToInt(Handle<JSArray>::cast(object)->length())
+ : object->elements().length();
+ new_element_dictionary =
+ length == 0 ? isolate->factory()->empty_slow_element_dictionary()
+ : object->GetElementsAccessor()->Normalize(object);
+ }
+ return new_element_dictionary;
+}
+
template <PropertyAttributes attrs>
Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw) {
@@ -3776,10 +3793,12 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
if (attrs == NONE && !object->map().is_extensible()) return Just(true);
- ElementsKind old_elements_kind = object->map().elements_kind();
- if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
- return Just(true);
- if (old_elements_kind == PACKED_FROZEN_ELEMENTS) return Just(true);
+ {
+ ElementsKind old_elements_kind = object->map().elements_kind();
+ if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
+ return Just(true);
+ if (old_elements_kind == PACKED_FROZEN_ELEMENTS) return Just(true);
+ }
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
@@ -3808,17 +3827,6 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
RETURN_FAILURE(isolate, should_throw, NewTypeError(message));
}
- Handle<NumberDictionary> new_element_dictionary;
- if (!object->HasTypedArrayElements() && !object->HasDictionaryElements() &&
- !object->HasSlowStringWrapperElements()) {
- int length = object->IsJSArray()
- ? Smi::ToInt(Handle<JSArray>::cast(object)->length())
- : object->elements().length();
- new_element_dictionary =
- length == 0 ? isolate->factory()->empty_slow_element_dictionary()
- : object->GetElementsAccessor()->Normalize(object);
- }
-
Handle<Symbol> transition_marker;
if (attrs == NONE) {
transition_marker = isolate->factory()->nonextensible_symbol();
@@ -3829,6 +3837,31 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
transition_marker = isolate->factory()->frozen_symbol();
}
+ // Currently, there are only have sealed/frozen Object element kinds and
+ // Map::MigrateToMap doesn't handle properties' attributes reconfiguring and
+ // elements kind change in one go. If seal or freeze with Smi or Double
+ // elements kind, we will transition to Object elements kind first to make
+ // sure of valid element access.
+ if (FLAG_enable_sealed_frozen_elements_kind &&
+ (attrs == SEALED || attrs == FROZEN)) {
+ switch (object->map().elements_kind()) {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ JSObject::TransitionElementsKind(object, PACKED_ELEMENTS);
+ break;
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ JSObject::TransitionElementsKind(object, HOLEY_ELEMENTS);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Make sure we only use this element dictionary in case we can't transition
+ // to sealed, frozen elements kind.
+ Handle<NumberDictionary> new_element_dictionary;
+
Handle<Map> old_map(object->map(), isolate);
old_map = Map::Update(isolate, old_map);
TransitionsAccessor transitions(isolate, old_map);
@@ -3840,16 +3873,22 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS ||
transition_map->has_frozen_or_sealed_elements());
DCHECK(!transition_map->is_extensible());
- JSObject::MigrateToMap(object, transition_map);
+ if (!transition_map->has_frozen_or_sealed_elements()) {
+ new_element_dictionary = CreateElementDictionary(isolate, object);
+ }
+ JSObject::MigrateToMap(isolate, object, transition_map);
} else if (transitions.CanHaveMoreTransitions()) {
// Create a new descriptor array with the appropriate property attributes
Handle<Map> new_map = Map::CopyForPreventExtensions(
isolate, old_map, attrs, transition_marker, "CopyForPreventExtensions");
- JSObject::MigrateToMap(object, new_map);
+ if (!new_map->has_frozen_or_sealed_elements()) {
+ new_element_dictionary = CreateElementDictionary(isolate, object);
+ }
+ JSObject::MigrateToMap(isolate, object, new_map);
} else {
DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
// Slow path: need to normalize properties for safety
- NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0,
+ NormalizeProperties(isolate, object, CLEAR_INOBJECT_PROPERTIES, 0,
"SlowPreventExtensions");
// Create a new map, since other objects with this map may be extensible.
@@ -3857,6 +3896,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<Map> new_map = Map::Copy(isolate, handle(object->map(), isolate),
"SlowCopyForPreventExtensions");
new_map->set_is_extensible(false);
+ new_element_dictionary = CreateElementDictionary(isolate, object);
if (!new_element_dictionary.is_null()) {
ElementsKind new_kind =
IsStringWrapperElementsKind(old_map->elements_kind())
@@ -3864,7 +3904,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
: DICTIONARY_ELEMENTS;
new_map->set_elements_kind(new_kind);
}
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (attrs != NONE) {
ReadOnlyRoots roots(isolate);
@@ -3883,6 +3923,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
if (object->map().has_frozen_or_sealed_elements()) {
+ DCHECK(new_element_dictionary.is_null());
return Just(true);
}
@@ -3990,7 +4031,7 @@ bool JSObject::HasEnumerableElements() {
return true;
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
- if (String::cast(JSValue::cast(object).value()).length() > 0) {
+ if (String::cast(JSPrimitiveWrapper::cast(object).value()).length() > 0) {
return true;
}
return object.elements().length() > 0;
@@ -4177,10 +4218,11 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
// static
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
bool enable_setup_mode) {
+ Isolate* isolate = object->GetIsolate();
if (object->IsJSGlobalObject()) return;
if (enable_setup_mode && PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
+ JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
}
if (object->map().is_prototype_map()) {
@@ -4189,10 +4231,9 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
} else {
- Handle<Map> new_map = Map::Copy(object->GetIsolate(),
- handle(object->map(), object->GetIsolate()),
- "CopyAsPrototype");
- JSObject::MigrateToMap(object, new_map);
+ Handle<Map> new_map =
+ Map::Copy(isolate, handle(object->map(), isolate), "CopyAsPrototype");
+ JSObject::MigrateToMap(isolate, object, new_map);
object->map().set_is_prototype_map(true);
// Replace the pointer to the exact constructor with the Object function
@@ -4451,7 +4492,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
Handle<Map> new_map =
Map::TransitionToPrototype(isolate, map, Handle<HeapObject>::cast(value));
DCHECK(new_map->prototype() == *value);
- JSObject::MigrateToMap(real_receiver, new_map);
+ JSObject::MigrateToMap(isolate, real_receiver, new_map);
DCHECK(size == object->Size());
return Just(true);
@@ -4481,10 +4522,6 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
object, args->slot_at(first_arg + arg_count - 1), arg_count, mode);
}
-ElementsAccessor* JSObject::GetElementsAccessor() {
- return ElementsAccessor::ForKind(GetElementsKind());
-}
-
void JSObject::ValidateElements(JSObject object) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -4560,22 +4597,22 @@ static ElementsKind BestFittingFastElementsKind(JSObject object) {
void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
Handle<Object> value,
PropertyAttributes attributes) {
- DCHECK(object->map().is_extensible());
-
Isolate* isolate = object->GetIsolate();
+ DCHECK(object->map(isolate).is_extensible());
+
uint32_t old_length = 0;
uint32_t new_capacity = 0;
- if (object->IsJSArray()) {
+ if (object->IsJSArray(isolate)) {
CHECK(JSArray::cast(*object).length().ToArrayLength(&old_length));
}
- ElementsKind kind = object->GetElementsKind();
- FixedArrayBase elements = object->elements();
+ ElementsKind kind = object->GetElementsKind(isolate);
+ FixedArrayBase elements = object->elements(isolate);
ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
if (IsSloppyArgumentsElementsKind(kind)) {
- elements = SloppyArgumentsElements::cast(elements).arguments();
+ elements = SloppyArgumentsElements::cast(elements).arguments(isolate);
dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
} else if (IsStringWrapperElementsKind(kind)) {
dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
@@ -4583,7 +4620,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (attributes != NONE) {
kind = dictionary_kind;
- } else if (elements.IsNumberDictionary()) {
+ } else if (elements.IsNumberDictionary(isolate)) {
kind = ShouldConvertToFastElements(
*object, NumberDictionary::cast(elements), index, &new_capacity)
? BestFittingFastElementsKind(*object)
@@ -4594,8 +4631,9 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
kind = dictionary_kind;
}
- ElementsKind to = value->OptimalElementsKind();
- if (IsHoleyElementsKind(kind) || !object->IsJSArray() || index > old_length) {
+ ElementsKind to = value->OptimalElementsKind(isolate);
+ if (IsHoleyElementsKind(kind) || !object->IsJSArray(isolate) ||
+ index > old_length) {
to = GetHoleyElementsKind(to);
kind = GetHoleyElementsKind(kind);
}
@@ -4603,7 +4641,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
accessor->Add(object, index, value, attributes, new_capacity);
- if (object->IsJSArray() && index >= old_length) {
+ if (object->IsJSArray(isolate) && index >= old_length) {
Handle<Object> new_length =
isolate->factory()->NewNumberFromUint(index + 1);
JSArray::cast(*object).set_length(*new_length);
@@ -4658,14 +4696,15 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
UpdateAllocationSite(object, to_kind);
- if (object->elements() == object->GetReadOnlyRoots().empty_fixed_array() ||
+ Isolate* isolate = object->GetIsolate();
+ if (object->elements() == ReadOnlyRoots(isolate).empty_fixed_array() ||
IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
- MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (FLAG_trace_elements_transitions) {
- Handle<FixedArrayBase> elms(object->elements(), object->GetIsolate());
+ Handle<FixedArrayBase> elms(object->elements(), isolate);
PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
} else {
@@ -4946,6 +4985,17 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
// static
void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function) {
Isolate* const isolate = function->GetIsolate();
+
+ if (function->has_feedback_vector()) {
+ // TODO(984344): Make this a CHECK that feedback vectors are identical to
+ // what we expect once we have removed all bytecode generation differences
+ // between eager and lazy compilation. For now just reset if they aren't
+ // identical
+ FeedbackVector vector = function->feedback_vector();
+ if (vector.length() == vector.metadata().slot_count()) return;
+ function->raw_feedback_cell().reset();
+ }
+
bool needs_feedback_vector = !FLAG_lazy_feedback_allocation;
// We need feedback vector for certain log events, collecting type profile
// and more precise code coverage.
@@ -4995,7 +5045,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
if (array_function->IsJSFunction() &&
*function == JSFunction::cast(*array_function)) {
- CacheInitialJSArrayMaps(native_context, new_map);
+ CacheInitialJSArrayMaps(isolate, native_context, new_map);
}
}
@@ -5034,7 +5084,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Map> new_map =
Map::Copy(isolate, handle(function->map(), isolate), "SetPrototype");
- JSObject::MigrateToMap(function, new_map);
+ JSObject::MigrateToMap(isolate, function, new_map);
new_map->SetConstructor(*value);
new_map->set_has_non_instance_prototype(true);
@@ -5145,14 +5195,16 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
+ case JS_FINALIZATION_GROUP_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_PROMISE_TYPE:
case JS_REGEXP_TYPE:
case JS_SET_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_TYPED_ARRAY_TYPE:
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_INSTANCE_TYPE:
@@ -5530,7 +5582,7 @@ void JSFunction::ClearTypeFeedbackInfo() {
FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();
if (vector.ClearSlots(isolate)) {
- IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), *this,
+ IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(),
"ClearTypeFeedbackInfo");
}
}
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 5ac1751c48..bcea3a28df 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -8,6 +8,7 @@
#include "src/objects/embedder-data-slot.h"
#include "src/objects/objects.h"
#include "src/objects/property-array.h"
+#include "torque-generated/class-definitions-tq.h"
#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
@@ -27,16 +28,16 @@ class JSReceiver : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
// Returns true if there is no slow (ie, dictionary) backing store.
- inline bool HasFastProperties() const;
+ DECL_GETTER(HasFastProperties, bool)
// Returns the properties array backing store if it
// exists. Otherwise, returns an empty_property_array when there's a
// Smi (hash code) or an empty_fixed_array for a fast properties
// map.
- inline PropertyArray property_array() const;
+ DECL_GETTER(property_array, PropertyArray)
// Gets slow properties for non-global objects.
- inline NameDictionary property_dictionary() const;
+ DECL_GETTER(property_dictionary, NameDictionary)
// Sets the properties backing store and makes sure any existing hash is moved
// to the new properties store. To clear out the properties store, pass in the
@@ -62,12 +63,13 @@ class JSReceiver : public HeapObject {
// above typed getters and setters to access the properties.
DECL_ACCESSORS(raw_properties_or_hash, Object)
- inline void initialize_properties();
+ inline void initialize_properties(Isolate* isolate);
// Deletes an existing named property in a normalized object.
static void DeleteNormalizedProperty(Handle<JSReceiver> object, int entry);
DECL_CAST(JSReceiver)
+ DECL_VERIFIER(JSReceiver)
// ES6 section 7.1.1 ToPrimitive
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
@@ -275,7 +277,7 @@ class JSReceiver : public HeapObject {
// properties.
// Note that the map of JSObject changes during execution to enable inline
// caching.
-class JSObject : public JSReceiver {
+class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
public:
static bool IsUnmodifiedApiObject(FullObjectSlot o);
@@ -290,78 +292,63 @@ class JSObject : public JSReceiver {
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> ObjectCreate(
Isolate* isolate, Handle<Object> prototype);
- // [elements]: The elements (properties with names that are integers).
- //
- // Elements can be in two general modes: fast and slow. Each mode
- // corresponds to a set of object representations of elements that
- // have something in common.
- //
- // In the fast mode elements is a FixedArray and so each element can be
- // quickly accessed. The elements array can have one of several maps in this
- // mode: fixed_array_map, fixed_double_array_map,
- // sloppy_arguments_elements_map or fixed_cow_array_map (for copy-on-write
- // arrays). In the latter case the elements array may be shared by a few
- // objects and so before writing to any element the array must be copied. Use
- // EnsureWritableFastElements in this case.
- //
- // In the slow mode the elements is either a NumberDictionary or a
- // FixedArray parameter map for a (sloppy) arguments object.
- DECL_ACCESSORS(elements, FixedArrayBase)
inline void initialize_elements();
static inline void SetMapAndElements(Handle<JSObject> object, Handle<Map> map,
Handle<FixedArrayBase> elements);
- inline ElementsKind GetElementsKind() const;
- V8_EXPORT_PRIVATE ElementsAccessor* GetElementsAccessor();
+ DECL_GETTER(GetElementsKind, ElementsKind)
+ DECL_GETTER(GetElementsAccessor, ElementsAccessor*)
+
// Returns true if an object has elements of PACKED_SMI_ELEMENTS or
// HOLEY_SMI_ELEMENTS ElementsKind.
- inline bool HasSmiElements();
+ DECL_GETTER(HasSmiElements, bool)
// Returns true if an object has elements of PACKED_ELEMENTS or
// HOLEY_ELEMENTS ElementsKind.
- inline bool HasObjectElements();
+ DECL_GETTER(HasObjectElements, bool)
// Returns true if an object has elements of PACKED_SMI_ELEMENTS,
// HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, or HOLEY_ELEMENTS.
- inline bool HasSmiOrObjectElements();
+ DECL_GETTER(HasSmiOrObjectElements, bool)
// Returns true if an object has any of the "fast" elements kinds.
- inline bool HasFastElements();
+ DECL_GETTER(HasFastElements, bool)
// Returns true if an object has any of the PACKED elements kinds.
- inline bool HasFastPackedElements();
+ DECL_GETTER(HasFastPackedElements, bool)
// Returns true if an object has elements of PACKED_DOUBLE_ELEMENTS or
// HOLEY_DOUBLE_ELEMENTS ElementsKind.
- inline bool HasDoubleElements();
+ DECL_GETTER(HasDoubleElements, bool)
// Returns true if an object has elements of HOLEY_SMI_ELEMENTS,
// HOLEY_DOUBLE_ELEMENTS, or HOLEY_ELEMENTS ElementsKind.
- inline bool HasHoleyElements();
- inline bool HasSloppyArgumentsElements();
- inline bool HasStringWrapperElements();
- inline bool HasDictionaryElements();
+ DECL_GETTER(HasHoleyElements, bool)
+ DECL_GETTER(HasSloppyArgumentsElements, bool)
+ DECL_GETTER(HasStringWrapperElements, bool)
+ DECL_GETTER(HasDictionaryElements, bool)
// Returns true if an object has elements of PACKED_ELEMENTS
- inline bool HasPackedElements();
- inline bool HasFrozenOrSealedElements();
- inline bool HasSealedElements();
-
- inline bool HasTypedArrayElements();
-
- inline bool HasFixedUint8ClampedElements();
- inline bool HasFixedArrayElements();
- inline bool HasFixedInt8Elements();
- inline bool HasFixedUint8Elements();
- inline bool HasFixedInt16Elements();
- inline bool HasFixedUint16Elements();
- inline bool HasFixedInt32Elements();
- inline bool HasFixedUint32Elements();
- inline bool HasFixedFloat32Elements();
- inline bool HasFixedFloat64Elements();
- inline bool HasFixedBigInt64Elements();
- inline bool HasFixedBigUint64Elements();
-
- inline bool HasFastArgumentsElements();
- inline bool HasSlowArgumentsElements();
- inline bool HasFastStringWrapperElements();
- inline bool HasSlowStringWrapperElements();
+ DECL_GETTER(HasPackedElements, bool)
+ DECL_GETTER(HasFrozenOrSealedElements, bool)
+ DECL_GETTER(HasSealedElements, bool)
+
+ DECL_GETTER(HasTypedArrayElements, bool)
+
+ DECL_GETTER(HasFixedUint8ClampedElements, bool)
+ DECL_GETTER(HasFixedArrayElements, bool)
+ DECL_GETTER(HasFixedInt8Elements, bool)
+ DECL_GETTER(HasFixedUint8Elements, bool)
+ DECL_GETTER(HasFixedInt16Elements, bool)
+ DECL_GETTER(HasFixedUint16Elements, bool)
+ DECL_GETTER(HasFixedInt32Elements, bool)
+ DECL_GETTER(HasFixedUint32Elements, bool)
+ DECL_GETTER(HasFixedFloat32Elements, bool)
+ DECL_GETTER(HasFixedFloat64Elements, bool)
+ DECL_GETTER(HasFixedBigInt64Elements, bool)
+ DECL_GETTER(HasFixedBigUint64Elements, bool)
+
+ DECL_GETTER(HasFastArgumentsElements, bool)
+ DECL_GETTER(HasSlowArgumentsElements, bool)
+ DECL_GETTER(HasFastStringWrapperElements, bool)
+ DECL_GETTER(HasSlowStringWrapperElements, bool)
bool HasEnumerableElements();
- inline NumberDictionary element_dictionary(); // Gets slow elements.
+ // Gets slow elements.
+ DECL_GETTER(element_dictionary, NumberDictionary)
// Requires: HasFastElements().
static void EnsureWritableFastElements(Handle<JSObject> object);
@@ -431,11 +418,11 @@ class JSObject : public JSReceiver {
// Migrates the given object to a map whose field representations are the
// lowest upper bound of all known representations for that field.
- static void MigrateInstance(Handle<JSObject> instance);
+ static void MigrateInstance(Isolate* isolate, Handle<JSObject> instance);
// Migrates the given object only if the target map is already available,
// or returns false if such a map is not yet available.
- static bool TryMigrateInstance(Handle<JSObject> instance);
+ static bool TryMigrateInstance(Isolate* isolate, Handle<JSObject> instance);
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
@@ -476,8 +463,8 @@ class JSObject : public JSReceiver {
int old_index, int new_index);
// Retrieve interceptors.
- inline InterceptorInfo GetNamedInterceptor();
- inline InterceptorInfo GetIndexedInterceptor();
+ DECL_GETTER(GetNamedInterceptor, InterceptorInfo)
+ DECL_GETTER(GetIndexedInterceptor, InterceptorInfo)
// Used from JSReceiver.
V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
@@ -546,8 +533,8 @@ class JSObject : public JSReceiver {
// Lookup interceptors are used for handling properties controlled by host
// objects.
- inline bool HasNamedInterceptor();
- inline bool HasIndexedInterceptor();
+ DECL_GETTER(HasNamedInterceptor, bool)
+ DECL_GETTER(HasIndexedInterceptor, bool)
// Support functions for v8 api (needed for correct interceptor behavior).
V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedProperty(
@@ -563,13 +550,12 @@ class JSObject : public JSReceiver {
// JSFunction objects.
static int GetHeaderSize(InstanceType instance_type,
bool function_has_prototype_slot = false);
- static inline int GetHeaderSize(const Map map);
- inline int GetHeaderSize() const;
+ static inline int GetHeaderSize(Map map);
- static inline int GetEmbedderFieldsStartOffset(const Map map);
+ static inline int GetEmbedderFieldsStartOffset(Map map);
inline int GetEmbedderFieldsStartOffset();
- static inline int GetEmbedderFieldCount(const Map map);
+ static inline int GetEmbedderFieldCount(Map map);
inline int GetEmbedderFieldCount() const;
inline int GetEmbedderFieldOffset(int index);
inline Object GetEmbedderField(int index);
@@ -596,7 +582,7 @@ class JSObject : public JSReceiver {
// |expected_additional_properties| is only used for fast-to-slow transitions
// and ignored otherwise.
V8_EXPORT_PRIVATE static void MigrateToMap(
- Handle<JSObject> object, Handle<Map> new_map,
+ Isolate* isolate, Handle<JSObject> object, Handle<Map> new_map,
int expected_additional_properties = 0);
// Forces a prototype without any of the checks that the regular SetPrototype
@@ -609,7 +595,7 @@ class JSObject : public JSReceiver {
// added this number can be indicated to have the backing store allocated to
// an initial capacity for holding these properties.
V8_EXPORT_PRIVATE static void NormalizeProperties(
- Handle<JSObject> object, PropertyNormalizationMode mode,
+ Isolate* isolate, Handle<JSObject> object, PropertyNormalizationMode mode,
int expected_additional_properties, const char* reason);
// Convert and update the elements backing store to be a
@@ -624,15 +610,17 @@ class JSObject : public JSReceiver {
int unused_property_fields,
const char* reason);
- inline bool IsUnboxedDoubleField(FieldIndex index);
+ inline bool IsUnboxedDoubleField(FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const;
// Access fast-case object properties at index.
static Handle<Object> FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index);
- inline Object RawFastPropertyAt(FieldIndex index);
- inline double RawFastDoublePropertyAt(FieldIndex index);
- inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
+ inline Object RawFastPropertyAt(FieldIndex index) const;
+ inline Object RawFastPropertyAt(Isolate* isolate, FieldIndex index) const;
+ inline double RawFastDoublePropertyAt(FieldIndex index) const;
+ inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
inline void FastPropertyAtPut(FieldIndex index, Object value);
inline void RawFastPropertyAtPut(
@@ -679,8 +667,6 @@ class JSObject : public JSReceiver {
static bool IsExtensible(Handle<JSObject> object);
- DECL_CAST(JSObject)
-
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
DECL_PRINTER(JSObject)
@@ -727,7 +713,7 @@ class JSObject : public JSReceiver {
// If a GC was caused while constructing this object, the elements pointer
// may point to a one pointer filler map. The object won't be rooted, but
// our heap verification code could stumble across it.
- V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine() const;
+ V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(Isolate* isolate) const;
#endif
Object SlowReverseLookup(Object value);
@@ -764,15 +750,6 @@ class JSObject : public JSReceiver {
STATIC_ASSERT(kMaxNumberOfDescriptors + kFieldsAdded <=
PropertyArray::kMaxLength);
-// Layout description.
-#define JS_OBJECT_FIELDS(V) \
- V(kElementsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize, JS_OBJECT_FIELDS)
-#undef JS_OBJECT_FIELDS
-
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
static const int kMaxInObjectProperties =
(kMaxInstanceSize - kHeaderSize) >> kTaggedSizeLog2;
@@ -825,7 +802,7 @@ class JSObject : public JSReceiver {
V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw);
- OBJECT_CONSTRUCTORS(JSObject, JSReceiver);
+ TQ_OBJECT_CONSTRUCTORS(JSObject)
};
// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
@@ -835,9 +812,17 @@ class JSObject : public JSReceiver {
class JSAccessorPropertyDescriptor : public JSObject {
public:
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize,
- TORQUE_GENERATED_JSACCESSOR_PROPERTY_DESCRIPTOR_FIELDS)
+#define JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS(V) \
+ V(kGetOffset, kTaggedSize) \
+ V(kSetOffset, kTaggedSize) \
+ V(kEnumerableOffset, kTaggedSize) \
+ V(kConfigurableOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS)
+#undef JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS
// Indices of in-object properties.
static const int kGetIndex = 0;
@@ -855,8 +840,18 @@ class JSAccessorPropertyDescriptor : public JSObject {
// FromPropertyDescriptor function for regular data properties.
class JSDataPropertyDescriptor : public JSObject {
public:
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JSDATA_PROPERTY_DESCRIPTOR_FIELDS)
+ // Layout description.
+#define JS_DATA_PROPERTY_DESCRIPTOR_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kWritableOffset, kTaggedSize) \
+ V(kEnumerableOffset, kTaggedSize) \
+ V(kConfigurableOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_DATA_PROPERTY_DESCRIPTOR_FIELDS)
+#undef JS_DATA_PROPERTY_DESCRIPTOR_FIELDS
// Indices of in-object properties.
static const int kValueIndex = 0;
@@ -870,7 +865,7 @@ class JSDataPropertyDescriptor : public JSObject {
// JSIteratorResult is just a JSObject with a specific initial map.
// This initial map adds in-object properties for "done" and "value",
-// as specified by ES6 section 25.1.1.3 The IteratorResult Interface
+// as specified by ES6 section 25.1.1.3 The IteratorResult Interface.
class JSIteratorResult : public JSObject {
public:
DECL_ACCESSORS(value, Object)
@@ -878,8 +873,15 @@ class JSIteratorResult : public JSObject {
DECL_ACCESSORS(done, Object)
// Layout description.
+#define JS_ITERATOR_RESULT_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kDoneOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSITERATOR_RESULT_FIELDS)
+ JS_ITERATOR_RESULT_FIELDS)
+#undef JS_ITERATOR_RESULT_FIELDS
// Indices of in-object properties.
static const int kValueIndex = 0;
@@ -894,7 +896,6 @@ class JSIteratorResult : public JSObject {
class JSBoundFunction : public JSObject {
public:
// [bound_target_function]: The wrapped function object.
- inline Object raw_bound_target_function() const;
DECL_ACCESSORS(bound_target_function, JSReceiver)
// [bound_this]: The value that is always passed as the this value when
@@ -933,7 +934,7 @@ class JSBoundFunction : public JSObject {
class JSFunction : public JSObject {
public:
// [prototype_or_initial_map]:
- DECL_ACCESSORS(prototype_or_initial_map, Object)
+ DECL_ACCESSORS(prototype_or_initial_map, HeapObject)
// [shared]: The information about the function that
// can be shared by instances.
@@ -947,7 +948,7 @@ class JSFunction : public JSObject {
// [context]: The context for this function.
inline Context context();
inline bool has_context() const;
- inline void set_context(Object context);
+ inline void set_context(HeapObject context);
inline JSGlobalProxy global_proxy();
inline NativeContext native_context();
inline int length();
@@ -1055,13 +1056,14 @@ class JSFunction : public JSObject {
inline bool NeedsResetDueToFlushedBytecode();
inline void ResetIfBytecodeFlushed();
- inline bool has_prototype_slot() const;
+ DECL_GETTER(has_prototype_slot, bool)
// The initial map for an object created by this constructor.
- inline Map initial_map();
+ DECL_GETTER(initial_map, Map)
+
static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
Handle<HeapObject> prototype);
- inline bool has_initial_map();
+ DECL_GETTER(has_initial_map, bool)
V8_EXPORT_PRIVATE static void EnsureHasInitialMap(
Handle<JSFunction> function);
@@ -1076,12 +1078,12 @@ class JSFunction : public JSObject {
// function has an initial map the prototype is set on the initial
// map. Otherwise, the prototype is put in the initial map field
// until an initial map is needed.
- inline bool has_prototype();
- inline bool has_instance_prototype();
- inline Object prototype();
- inline HeapObject instance_prototype();
- inline bool has_prototype_property();
- inline bool PrototypeRequiresRuntimeLookup();
+ DECL_GETTER(has_prototype, bool)
+ DECL_GETTER(has_instance_prototype, bool)
+ DECL_GETTER(prototype, Object)
+ DECL_GETTER(instance_prototype, HeapObject)
+ DECL_GETTER(has_prototype_property, bool)
+ DECL_GETTER(PrototypeRequiresRuntimeLookup, bool)
static void SetPrototype(Handle<JSFunction> function, Handle<Object> value);
// Returns if this function has been compiled to native code yet.
@@ -1149,14 +1151,9 @@ class JSFunction : public JSObject {
//
// Accessing a JSGlobalProxy requires security check.
-class JSGlobalProxy : public JSObject {
+class JSGlobalProxy
+ : public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSObject> {
public:
- // [native_context]: the owner native context of this global proxy object.
- // It is null value if this object is not used by any context.
- DECL_ACCESSORS(native_context, Object)
-
- DECL_CAST(JSGlobalProxy)
-
inline bool IsDetachedFrom(JSGlobalObject global) const;
static int SizeWithEmbedderFields(int embedder_field_count);
@@ -1165,11 +1162,7 @@ class JSGlobalProxy : public JSObject {
DECL_PRINTER(JSGlobalProxy)
DECL_VERIFIER(JSGlobalProxy)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSGLOBAL_PROXY_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSGlobalProxy, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSGlobalProxy)
};
// JavaScript global object.
@@ -1182,7 +1175,7 @@ class JSGlobalObject : public JSObject {
DECL_ACCESSORS(global_proxy, JSGlobalProxy)
// Gets global object properties.
- inline GlobalDictionary global_dictionary();
+ DECL_GETTER(global_dictionary, GlobalDictionary)
inline void set_global_dictionary(GlobalDictionary dictionary);
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
@@ -1208,22 +1201,13 @@ class JSGlobalObject : public JSObject {
};
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
-class JSValue : public JSObject {
+class JSPrimitiveWrapper
+ : public TorqueGeneratedJSPrimitiveWrapper<JSPrimitiveWrapper, JSObject> {
public:
- // [value]: the object being wrapped.
- DECL_ACCESSORS(value, Object)
-
- DECL_CAST(JSValue)
-
// Dispatched behavior.
- DECL_PRINTER(JSValue)
- DECL_VERIFIER(JSValue)
+ DECL_PRINTER(JSPrimitiveWrapper)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSVALUE_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSValue, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSPrimitiveWrapper)
};
class DateCache;
@@ -1367,8 +1351,7 @@ class JSMessageObject : public JSObject {
// EnsureSourcePositionsAvailable must have been called before calling this.
Handle<String> GetSourceLine() const;
- inline int error_level() const;
- inline void set_error_level(int level);
+ DECL_INT_ACCESSORS(error_level)
DECL_CAST(JSMessageObject)
@@ -1384,8 +1367,6 @@ class JSMessageObject : public JSObject {
using BodyDescriptor = FixedBodyDescriptor<HeapObject::kMapOffset,
kPointerFieldsEndOffset, kSize>;
- OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
-
private:
friend class Factory;
@@ -1400,12 +1381,14 @@ class JSMessageObject : public JSObject {
DECL_ACCESSORS(bytecode_offset, Smi)
// [start_position]: the start position in the script for the error message.
- inline int start_position() const;
- inline void set_start_position(int value);
+ DECL_INT_ACCESSORS(start_position)
// [end_position]: the end position in the script for the error message.
- inline int end_position() const;
- inline void set_end_position(int value);
+ DECL_INT_ACCESSORS(end_position)
+
+ DECL_INT_ACCESSORS(raw_type)
+
+ OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
};
// The [Async-from-Sync Iterator] object
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index 1924bdc4ff..b8fe7f50f0 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -25,11 +25,12 @@ ACCESSORS(JSPluralRules, locale, String, kLocaleOffset)
SMI_ACCESSORS(JSPluralRules, flags, kFlagsOffset)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
kIcuPluralRulesOffset)
-ACCESSORS(JSPluralRules, icu_decimal_format, Managed<icu::DecimalFormat>,
- kIcuDecimalFormatOffset)
+ACCESSORS(JSPluralRules, icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>,
+ kIcuNumberFormatterOffset)
inline void JSPluralRules::set_type(Type type) {
- DCHECK_LT(type, Type::COUNT);
+ DCHECK_LE(type, TypeBits::kMax);
int hints = flags();
hints = TypeBits::update(hints, type);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index 8daf5db64a..84fe9b6d52 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -10,11 +10,12 @@
#include "src/execution/isolate-inl.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-number-format.h"
#include "src/objects/js-plural-rules-inl.h"
-#include "unicode/decimfmt.h"
#include "unicode/locid.h"
-#include "unicode/numfmt.h"
+#include "unicode/numberformatter.h"
#include "unicode/plurrule.h"
+#include "unicode/unumberformatter.h"
namespace v8 {
namespace internal {
@@ -23,8 +24,7 @@ namespace {
bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
JSPluralRules::Type type,
- std::unique_ptr<icu::PluralRules>* pl,
- std::unique_ptr<icu::DecimalFormat>* nf) {
+ std::unique_ptr<icu::PluralRules>* pl) {
// Make formatter from options. Numbering system is added
// to the locale as Unicode extension (if it was specified at all).
UErrorCode status = U_ZERO_ERROR;
@@ -43,41 +43,10 @@ bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
}
CHECK_NOT_NULL(plural_rules.get());
- std::unique_ptr<icu::DecimalFormat> number_format(
- static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status)));
- if (U_FAILURE(status)) {
- return false;
- }
- CHECK_NOT_NULL(number_format.get());
-
*pl = std::move(plural_rules);
- *nf = std::move(number_format);
-
return true;
}
-void InitializeICUPluralRules(
- Isolate* isolate, const icu::Locale& icu_locale, JSPluralRules::Type type,
- std::unique_ptr<icu::PluralRules>* plural_rules,
- std::unique_ptr<icu::DecimalFormat>* number_format) {
- bool success = CreateICUPluralRules(isolate, icu_locale, type, plural_rules,
- number_format);
- if (!success) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- success = CreateICUPluralRules(isolate, no_extension_locale, type,
- plural_rules, number_format);
-
- if (!success) {
- FATAL("Failed to create ICU PluralRules, are ICU data files missing?");
- }
- }
-
- CHECK_NOT_NULL((*plural_rules).get());
- CHECK_NOT_NULL((*number_format).get());
-}
-
} // namespace
Handle<String> JSPluralRules::TypeAsString() const {
@@ -86,16 +55,14 @@ Handle<String> JSPluralRules::TypeAsString() const {
return GetReadOnlyRoots().cardinal_string_handle();
case Type::ORDINAL:
return GetReadOnlyRoots().ordinal_string_handle();
- case Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
// static
-MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
- Isolate* isolate, Handle<JSPluralRules> plural_rules,
- Handle<Object> locales, Handle<Object> options_obj) {
- plural_rules->set_flags(0);
+MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options_obj) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -135,9 +102,6 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
MAYBE_RETURN(maybe_type, MaybeHandle<JSPluralRules>());
Type type = maybe_type.FromJust();
- // 8. Set pluralRules.[[Type]] to t.
- plural_rules->set_type(type);
-
// Note: The spec says we should do ResolveLocale after performing
// SetNumberFormatDigitOptions but we need the locale to create all
// the ICU data structures.
@@ -150,48 +114,64 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
Intl::ResolvedLocale r =
Intl::ResolveLocale(isolate, JSPluralRules::GetAvailableLocales(),
requested_locales, matcher, {});
-
- // 12. Set pluralRules.[[Locale]] to the value of r.[[locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- plural_rules->set_locale(*locale_str);
+
+ icu::number::LocalizedNumberFormatter icu_number_formatter =
+ icu::number::NumberFormatter::withLocale(r.icu_locale)
+ .roundingMode(UNUM_ROUND_HALFUP);
std::unique_ptr<icu::PluralRules> icu_plural_rules;
- std::unique_ptr<icu::DecimalFormat> icu_decimal_format;
- InitializeICUPluralRules(isolate, r.icu_locale, type, &icu_plural_rules,
- &icu_decimal_format);
+ bool success =
+ CreateICUPluralRules(isolate, r.icu_locale, type, &icu_plural_rules);
+ if (!success) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(r.icu_locale.getBaseName());
+ success = CreateICUPluralRules(isolate, no_extension_locale, type,
+ &icu_plural_rules);
+ icu_number_formatter =
+ icu::number::NumberFormatter::withLocale(no_extension_locale)
+ .roundingMode(UNUM_ROUND_HALFUP);
+
+ if (!success) {
+ FATAL("Failed to create ICU PluralRules, are ICU data files missing?");
+ }
+ }
+
CHECK_NOT_NULL(icu_plural_rules.get());
- CHECK_NOT_NULL(icu_decimal_format.get());
// 9. Perform ? SetNumberFormatDigitOptions(pluralRules, options, 0, 3).
Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
- Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3);
+ Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3, false);
MAYBE_RETURN(maybe_digit_options, MaybeHandle<JSPluralRules>());
Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
-
- icu_decimal_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
- icu_decimal_format->setMinimumIntegerDigits(
- digit_options.minimum_integer_digits);
- icu_decimal_format->setMinimumFractionDigits(
- digit_options.minimum_fraction_digits);
- icu_decimal_format->setMaximumFractionDigits(
- digit_options.maximum_fraction_digits);
- if (digit_options.minimum_significant_digits > 0) {
- icu_decimal_format->setMinimumSignificantDigits(
- digit_options.minimum_significant_digits);
- icu_decimal_format->setMaximumSignificantDigits(
- digit_options.maximum_significant_digits);
- }
+ icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter(
+ icu_number_formatter, digit_options);
Handle<Managed<icu::PluralRules>> managed_plural_rules =
Managed<icu::PluralRules>::FromUniquePtr(isolate, 0,
std::move(icu_plural_rules));
- plural_rules->set_icu_plural_rules(*managed_plural_rules);
- Handle<Managed<icu::DecimalFormat>> managed_decimal_format =
- Managed<icu::DecimalFormat>::FromUniquePtr(isolate, 0,
- std::move(icu_decimal_format));
- plural_rules->set_icu_decimal_format(*managed_decimal_format);
+ Handle<Managed<icu::number::LocalizedNumberFormatter>>
+ managed_number_formatter =
+ Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
+ isolate, 0,
+ new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSPluralRules> plural_rules = Handle<JSPluralRules>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ plural_rules->set_flags(0);
+
+ // 8. Set pluralRules.[[Type]] to t.
+ plural_rules->set_type(type);
+
+ // 12. Set pluralRules.[[Locale]] to the value of r.[[locale]].
+ plural_rules->set_locale(*locale_str);
+
+ plural_rules->set_icu_plural_rules(*managed_plural_rules);
+ plural_rules->set_icu_number_formatter(*managed_number_formatter);
// 13. Return pluralRules.
return plural_rules;
@@ -202,31 +182,20 @@ MaybeHandle<String> JSPluralRules::ResolvePlural(
icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
- icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format().raw();
- CHECK_NOT_NULL(icu_decimal_format);
+ icu::number::LocalizedNumberFormatter* fmt =
+ plural_rules->icu_number_formatter().raw();
+ CHECK_NOT_NULL(fmt);
- // Currently, PluralRules doesn't implement all the options for rounding that
- // the Intl spec provides; format and parse the number to round to the
- // appropriate amount, then apply PluralRules.
- //
- // TODO(littledan): If a future ICU version supports an extended API to avoid
- // this step, then switch to that API. Bug thread:
- // http://bugs.icu-project.org/trac/ticket/12763
- icu::UnicodeString rounded_string;
- icu_decimal_format->format(number, rounded_string);
-
- icu::Formattable formattable;
UErrorCode status = U_ZERO_ERROR;
- icu_decimal_format->parse(rounded_string, formattable, status);
+ icu::number::FormattedNumber formatted_number =
+ fmt->formatDouble(number, status);
CHECK(U_SUCCESS(status));
- double rounded = formattable.getDouble(status);
+ icu::UnicodeString result =
+ icu_plural_rules->select(formatted_number, status);
CHECK(U_SUCCESS(status));
- icu::UnicodeString result = icu_plural_rules->select(rounded);
- return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+ return Intl::ToString(isolate, result);
}
namespace {
@@ -261,36 +230,27 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
CreateDataPropertyForOptions(isolate, options, plural_rules->TypeAsString(),
"type");
- icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format().raw();
- CHECK_NOT_NULL(icu_decimal_format);
-
- // This is a safe upcast as icu::DecimalFormat inherits from
- // icu::NumberFormat.
- icu::NumberFormat* icu_number_format =
- static_cast<icu::NumberFormat*>(icu_decimal_format);
+ UErrorCode status = U_ZERO_ERROR;
+ icu::number::LocalizedNumberFormatter* icu_number_formatter =
+ plural_rules->icu_number_formatter().raw();
+ icu::UnicodeString skeleton = icu_number_formatter->toSkeleton(status);
+ CHECK(U_SUCCESS(status));
- int min_int_digits = icu_number_format->getMinimumIntegerDigits();
- CreateDataPropertyForOptions(isolate, options, min_int_digits,
- "minimumIntegerDigits");
+ CreateDataPropertyForOptions(
+ isolate, options,
+ JSNumberFormat::MinimumIntegerDigitsFromSkeleton(skeleton),
+ "minimumIntegerDigits");
+ int32_t min = 0, max = 0;
+ JSNumberFormat::FractionDigitsFromSkeleton(skeleton, &min, &max);
- int min_fraction_digits = icu_number_format->getMinimumFractionDigits();
- CreateDataPropertyForOptions(isolate, options, min_fraction_digits,
- "minimumFractionDigits");
+ CreateDataPropertyForOptions(isolate, options, min, "minimumFractionDigits");
- int max_fraction_digits = icu_number_format->getMaximumFractionDigits();
- CreateDataPropertyForOptions(isolate, options, max_fraction_digits,
- "maximumFractionDigits");
+ CreateDataPropertyForOptions(isolate, options, max, "maximumFractionDigits");
- if (icu_decimal_format->areSignificantDigitsUsed()) {
- int min_significant_digits =
- icu_decimal_format->getMinimumSignificantDigits();
- CreateDataPropertyForOptions(isolate, options, min_significant_digits,
+ if (JSNumberFormat::SignificantDigitsFromSkeleton(skeleton, &min, &max)) {
+ CreateDataPropertyForOptions(isolate, options, min,
"minimumSignificantDigits");
-
- int max_significant_digits =
- icu_decimal_format->getMaximumSignificantDigits();
- CreateDataPropertyForOptions(isolate, options, max_significant_digits,
+ CreateDataPropertyForOptions(isolate, options, max,
"maximumSignificantDigits");
}
@@ -299,7 +259,6 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
- UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> categories(
icu_plural_rules->getKeywords(status));
CHECK(U_SUCCESS(status));
@@ -329,13 +288,39 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
return options;
}
+namespace {
+
+class PluralRulesAvailableLocales {
+ public:
+ PluralRulesAvailableLocales() {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> locales(
+ icu::PluralRules::getAvailableLocales(status));
+ CHECK(U_SUCCESS(status));
+ int32_t len = 0;
+ const char* locale = nullptr;
+ while ((locale = locales->next(&len, status)) != nullptr &&
+ U_SUCCESS(status)) {
+ std::string str(locale);
+ if (len > 3) {
+ std::replace(str.begin(), str.end(), '_', '-');
+ }
+ set_.insert(std::move(str));
+ }
+ }
+ const std::set<std::string>& Get() const { return set_; }
+
+ private:
+ std::set<std::string> set_;
+};
+
+} // namespace
+
const std::set<std::string>& JSPluralRules::GetAvailableLocales() {
- // TODO(ftang): For PluralRules, filter out locales that
- // don't support PluralRules.
- // PluralRules is missing an appropriate getAvailableLocales method,
- // so we should filter from all locales, but it's not clear how; see
- // https://ssl.icu-project.org/trac/ticket/12756
- return Intl::GetAvailableLocalesForLocale();
+ static base::LazyInstance<PluralRulesAvailableLocales>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
+ // return Intl::GetAvailableLocalesForLocale();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 249090bdf6..840efb07ed 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -22,8 +22,10 @@
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
-class DecimalFormat;
class PluralRules;
+namespace number {
+class LocalizedNumberFormatter;
+} // namespace number
} // namespace U_ICU_NAMESPACE
namespace v8 {
@@ -31,9 +33,9 @@ namespace internal {
class JSPluralRules : public JSObject {
public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> Initialize(
- Isolate* isolate, Handle<JSPluralRules> plural_rules,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
Handle<JSPluralRules> plural_rules);
@@ -45,12 +47,7 @@ class JSPluralRules : public JSObject {
// [[Type]] is one of the values "cardinal" or "ordinal",
// identifying the plural rules used.
- enum class Type {
- CARDINAL,
- ORDINAL,
-
- COUNT
- };
+ enum class Type { CARDINAL, ORDINAL };
inline void set_type(Type type);
inline Type type() const;
@@ -76,7 +73,8 @@ class JSPluralRules : public JSObject {
DECL_ACCESSORS(locale, String)
DECL_INT_ACCESSORS(flags)
DECL_ACCESSORS(icu_plural_rules, Managed<icu::PluralRules>)
- DECL_ACCESSORS(icu_decimal_format, Managed<icu::DecimalFormat>)
+ DECL_ACCESSORS(icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>)
OBJECT_CONSTRUCTORS(JSPluralRules, JSObject);
};
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
index f33628b5c2..0683cfeec8 100644
--- a/deps/v8/src/objects/js-proxy-inl.h
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -15,12 +15,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSProxy, JSReceiver)
-
-CAST_ACCESSOR(JSProxy)
-
-ACCESSORS(JSProxy, target, Object, kTargetOffset)
-ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSProxy)
bool JSProxy::IsRevoked() const { return !handler().IsJSReceiver(); }
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index c4f98927e9..8e29c08bc1 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -15,21 +15,14 @@ namespace v8 {
namespace internal {
// The JSProxy describes EcmaScript Harmony proxies
-class JSProxy : public JSReceiver {
+class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
Handle<Object>,
Handle<Object>);
- // [handler]: The handler property.
- DECL_ACCESSORS(handler, Object)
- // [target]: The target property.
- DECL_ACCESSORS(target, Object)
-
static MaybeHandle<NativeContext> GetFunctionRealm(Handle<JSProxy> proxy);
- DECL_CAST(JSProxy)
-
V8_INLINE bool IsRevoked() const;
static void Revoke(Handle<JSProxy> proxy);
@@ -70,6 +63,10 @@ class JSProxy : public JSReceiver {
V8_WARN_UNUSED_RESULT static Maybe<bool> CheckHasTrap(
Isolate* isolate, Handle<Name> name, Handle<JSReceiver> target);
+ // ES6 9.5.10
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CheckDeleteTrap(
+ Isolate* isolate, Handle<Name> name, Handle<JSReceiver> target);
+
// ES6 9.5.8
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
@@ -106,10 +103,6 @@ class JSProxy : public JSReceiver {
static const int kMaxIterationLimit = 100 * 1024;
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize,
- TORQUE_GENERATED_JSPROXY_FIELDS)
-
// kTargetOffset aliases with the elements of JSObject. The fact that
// JSProxy::target is a Javascript value which cannot be confused with an
// elements backing store is exploited by loading from this offset from an
@@ -125,7 +118,7 @@ class JSProxy : public JSReceiver {
PropertyDescriptor* desc,
Maybe<ShouldThrow> should_throw);
- OBJECT_CONSTRUCTORS(JSProxy, JSReceiver);
+ TQ_OBJECT_CONSTRUCTORS(JSProxy)
};
// JSProxyRevocableResult is just a JSObject with a specific initial map.
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index e525c66e3e..18355079f8 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -37,13 +37,13 @@ class JSRegExp : public JSObject {
// IRREGEXP: Compiled with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
struct FlagShiftBit {
- static const int kGlobal = 0;
- static const int kIgnoreCase = 1;
- static const int kMultiline = 2;
- static const int kSticky = 3;
- static const int kUnicode = 4;
- static const int kDotAll = 5;
- static const int kInvalid = 7;
+ static constexpr int kGlobal = 0;
+ static constexpr int kIgnoreCase = 1;
+ static constexpr int kMultiline = 2;
+ static constexpr int kSticky = 3;
+ static constexpr int kUnicode = 4;
+ static constexpr int kDotAll = 5;
+ static constexpr int kInvalid = 6;
};
enum Flag : uint8_t {
kNone = 0,
@@ -57,28 +57,31 @@ class JSRegExp : public JSObject {
kInvalid = 1 << FlagShiftBit::kInvalid, // Not included in FlagCount.
};
using Flags = base::Flags<Flag>;
- static constexpr int FlagCount() { return 6; }
-
- static int FlagShiftBits(Flag flag) {
- switch (flag) {
- case kGlobal:
- return FlagShiftBit::kGlobal;
- case kIgnoreCase:
- return FlagShiftBit::kIgnoreCase;
- case kMultiline:
- return FlagShiftBit::kMultiline;
- case kSticky:
- return FlagShiftBit::kSticky;
- case kUnicode:
- return FlagShiftBit::kUnicode;
- case kDotAll:
- return FlagShiftBit::kDotAll;
- default:
- STATIC_ASSERT(FlagCount() == 6);
- UNREACHABLE();
- }
+
+ static constexpr int kFlagCount = 6;
+
+ static constexpr Flag FlagFromChar(char c) {
+ STATIC_ASSERT(kFlagCount == 6);
+ // clang-format off
+ return c == 'g' ? kGlobal
+ : c == 'i' ? kIgnoreCase
+ : c == 'm' ? kMultiline
+ : c == 'y' ? kSticky
+ : c == 'u' ? kUnicode
+ : c == 's' ? kDotAll
+ : kInvalid;
+ // clang-format on
}
+ STATIC_ASSERT(static_cast<int>(kNone) == v8::RegExp::kNone);
+ STATIC_ASSERT(static_cast<int>(kGlobal) == v8::RegExp::kGlobal);
+ STATIC_ASSERT(static_cast<int>(kIgnoreCase) == v8::RegExp::kIgnoreCase);
+ STATIC_ASSERT(static_cast<int>(kMultiline) == v8::RegExp::kMultiline);
+ STATIC_ASSERT(static_cast<int>(kSticky) == v8::RegExp::kSticky);
+ STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
+ STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
+ STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
+
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
DECL_ACCESSORS(last_index, Object)
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 1ff66b1a12..fac3439b31 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -27,7 +27,7 @@ ACCESSORS(JSRelativeTimeFormat, icu_formatter,
SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
inline void JSRelativeTimeFormat::set_style(Style style) {
- DCHECK_GT(Style::COUNT, style);
+ DCHECK_GE(StyleBits::kMax, style);
int hints = flags();
hints = StyleBits::update(hints, style);
set_flags(hints);
@@ -38,7 +38,7 @@ inline JSRelativeTimeFormat::Style JSRelativeTimeFormat::style() const {
}
inline void JSRelativeTimeFormat::set_numeric(Numeric numeric) {
- DCHECK_GT(Numeric::COUNT, numeric);
+ DCHECK_GE(NumericBits::kMax, numeric);
int hints = flags();
hints = NumericBits::update(hints, numeric);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 59a3bf7ea0..28f8c757ee 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -34,9 +34,8 @@ UDateRelativeDateTimeFormatterStyle getIcuStyle(
return UDAT_STYLE_SHORT;
case JSRelativeTimeFormat::Style::NARROW:
return UDAT_STYLE_NARROW;
- case JSRelativeTimeFormat::Style::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
} // namespace
@@ -54,11 +53,9 @@ JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::getNumeric(
UNREACHABLE();
}
-MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
- Isolate* isolate, Handle<JSRelativeTimeFormat> relative_time_format_holder,
- Handle<Object> locales, Handle<Object> input_options) {
- relative_time_format_holder->set_flags(0);
-
+MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> input_options) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -125,7 +122,6 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Handle<String> locale_str = isolate->factory()->NewStringFromAsciiChecked(
maybe_locale_str.FromJust().c_str());
- relative_time_format_holder->set_locale(*locale_str);
// 15. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
@@ -136,9 +132,6 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
Style style_enum = maybe_style.FromJust();
- // 16. Set relativeTimeFormat.[[Style]] to s.
- relative_time_format_holder->set_style(style_enum);
-
// 17. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
@@ -147,9 +140,6 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
Numeric numeric_enum = maybe_numeric.FromJust();
- // 18. Set relativeTimeFormat.[[Numeric]] to numeric.
- relative_time_format_holder->set_numeric(numeric_enum);
-
// 19. Let relativeTimeFormat.[[NumberFormat]] be
// ? Construct(%NumberFormat%, « nfLocale, nfOptions »).
icu::NumberFormat* number_format =
@@ -179,6 +169,21 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Managed<icu::RelativeDateTimeFormatter>::FromRawPtr(isolate, 0,
icu_formatter);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSRelativeTimeFormat> relative_time_format_holder =
+ Handle<JSRelativeTimeFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ relative_time_format_holder->set_flags(0);
+
+ relative_time_format_holder->set_locale(*locale_str);
+
+ // 16. Set relativeTimeFormat.[[Style]] to s.
+ relative_time_format_holder->set_style(style_enum);
+
+ // 18. Set relativeTimeFormat.[[Numeric]] to numeric.
+ relative_time_format_holder->set_numeric(numeric_enum);
+
// 21. Set relativeTimeFormat.[[InitializedRelativeTimeFormat]] to true.
relative_time_format_holder->set_icu_formatter(*managed_formatter);
@@ -214,9 +219,8 @@ Handle<String> JSRelativeTimeFormat::StyleAsString() const {
return GetReadOnlyRoots().short_string_handle();
case Style::NARROW:
return GetReadOnlyRoots().narrow_string_handle();
- case Style::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
Handle<String> JSRelativeTimeFormat::NumericAsString() const {
@@ -225,9 +229,8 @@ Handle<String> JSRelativeTimeFormat::NumericAsString() const {
return GetReadOnlyRoots().always_string_handle();
case Numeric::AUTO:
return GetReadOnlyRoots().auto_string_handle();
- case Numeric::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
namespace {
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 740336c29c..6e405e345e 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -30,12 +30,11 @@ namespace internal {
class JSRelativeTimeFormat : public JSObject {
public:
- // Initializes relative time format object with properties derived from input
+ // Creates relative time format object with properties derived from input
// locales and options.
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSRelativeTimeFormat> Initialize(
- Isolate* isolate,
- Handle<JSRelativeTimeFormat> relative_time_format_holder,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSRelativeTimeFormat> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSRelativeTimeFormat> format_holder);
@@ -67,10 +66,9 @@ class JSRelativeTimeFormat : public JSObject {
// ecma402/#sec-properties-of-intl-relativetimeformat-instances
enum class Style {
- LONG, // Everything spelled out.
- SHORT, // Abbreviations used when possible.
- NARROW, // Use the shortest possible form.
- COUNT
+ LONG, // Everything spelled out.
+ SHORT, // Abbreviations used when possible.
+ NARROW // Use the shortest possible form.
};
inline void set_style(Style style);
inline Style style() const;
@@ -82,9 +80,8 @@ class JSRelativeTimeFormat : public JSObject {
// ecma402/#sec-properties-of-intl-relativetimeformat-instances
enum class Numeric {
ALWAYS, // numerical descriptions are always used ("1 day ago")
- AUTO, // numerical descriptions are used only when no more specific
+ AUTO // numerical descriptions are used only when no more specific
// version is available ("yesterday")
- COUNT
};
inline void set_numeric(Numeric numeric);
inline Numeric numeric() const;
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index 24a827c030..b2d745179a 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -35,7 +35,7 @@ CAST_ACCESSOR(JSSegmentIterator)
inline void JSSegmentIterator::set_granularity(
JSSegmenter::Granularity granularity) {
- DCHECK_GT(JSSegmenter::Granularity::COUNT, granularity);
+ DCHECK_GE(GranularityBits::kMax, granularity);
int hints = flags();
hints = GranularityBits::update(hints, granularity);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
index 3d2b19ca5c..509db37d44 100644
--- a/deps/v8/src/objects/js-segment-iterator.cc
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -37,9 +37,8 @@ Handle<String> JSSegmentIterator::GranularityAsString() const {
return GetReadOnlyRoots().word_string_handle();
case JSSegmenter::Granularity::SENTENCE:
return GetReadOnlyRoots().sentence_string_handle();
- case JSSegmenter::Granularity::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
MaybeHandle<JSSegmentIterator> JSSegmentIterator::Create(
@@ -49,22 +48,25 @@ MaybeHandle<JSSegmentIterator> JSSegmentIterator::Create(
// 1. Let iterator be ObjectCreate(%SegmentIteratorPrototype%).
Handle<Map> map = Handle<Map>(
isolate->native_context()->intl_segment_iterator_map(), isolate);
- Handle<JSObject> result = isolate->factory()->NewJSObjectFromMap(map);
+ Handle<Managed<icu::BreakIterator>> managed_break_iterator =
+ Managed<icu::BreakIterator>::FromRawPtr(isolate, 0, break_iterator);
+ Handle<Managed<icu::UnicodeString>> unicode_string =
+ Intl::SetTextToBreakIterator(isolate, text, break_iterator);
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSObject> result = isolate->factory()->NewJSObjectFromMap(map);
+ DisallowHeapAllocation no_gc;
Handle<JSSegmentIterator> segment_iterator =
Handle<JSSegmentIterator>::cast(result);
segment_iterator->set_flags(0);
segment_iterator->set_granularity(granularity);
// 2. Let iterator.[[SegmentIteratorSegmenter]] be segmenter.
- Handle<Managed<icu::BreakIterator>> managed_break_iterator =
- Managed<icu::BreakIterator>::FromRawPtr(isolate, 0, break_iterator);
segment_iterator->set_icu_break_iterator(*managed_break_iterator);
// 3. Let iterator.[[SegmentIteratorString]] be string.
- Managed<icu::UnicodeString> unicode_string =
- Intl::SetTextToBreakIterator(isolate, text, break_iterator);
- segment_iterator->set_unicode_string(unicode_string);
+ segment_iterator->set_unicode_string(*unicode_string);
// 4. Let iterator.[[SegmentIteratorIndex]] be 0.
// step 4 is stored inside break_iterator.
@@ -119,9 +121,8 @@ Handle<Object> JSSegmentIterator::BreakType() const {
return GetReadOnlyRoots().sep_string_handle();
}
return GetReadOnlyRoots().undefined_value_handle();
- case JSSegmenter::Granularity::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
// ecma402 #sec-segment-iterator-prototype-index
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index b4adf4c8e6..a31de29c25 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -27,7 +27,7 @@ ACCESSORS(JSSegmenter, icu_break_iterator, Managed<icu::BreakIterator>,
SMI_ACCESSORS(JSSegmenter, flags, kFlagsOffset)
inline void JSSegmenter::set_granularity(Granularity granularity) {
- DCHECK_GT(Granularity::COUNT, granularity);
+ DCHECK_GE(GranularityBits::kMax, granularity);
int hints = flags();
hints = GranularityBits::update(hints, granularity);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 5321334678..7985cf1c99 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -30,11 +30,9 @@ JSSegmenter::Granularity JSSegmenter::GetGranularity(const char* str) {
UNREACHABLE();
}
-MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
- Isolate* isolate, Handle<JSSegmenter> segmenter_holder,
- Handle<Object> locales, Handle<Object> input_options) {
- segmenter_holder->set_flags(0);
-
+MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> input_options) {
// 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -69,11 +67,8 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
Intl::ResolvedLocale r =
Intl::ResolveLocale(isolate, JSSegmenter::GetAvailableLocales(),
requested_locales, matcher, {});
-
- // 10. Set segmenter.[[Locale]] to the value of r.[[Locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- segmenter_holder->set_locale(*locale_str);
// 13. Let granularity be ? GetOption(options, "granularity", "string", «
// "grapheme", "word", "sentence" », "grapheme").
@@ -85,9 +80,6 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
MAYBE_RETURN(maybe_granularity, MaybeHandle<JSSegmenter>());
Granularity granularity_enum = maybe_granularity.FromJust();
- // 14. Set segmenter.[[SegmenterGranularity]] to granularity.
- segmenter_holder->set_granularity(granularity_enum);
-
icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
@@ -107,8 +99,6 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
icu_break_iterator.reset(
icu::BreakIterator::createSentenceInstance(icu_locale, status));
break;
- case Granularity::COUNT:
- UNREACHABLE();
}
CHECK(U_SUCCESS(status));
@@ -118,6 +108,18 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
Managed<icu::BreakIterator>::FromUniquePtr(isolate, 0,
std::move(icu_break_iterator));
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSSegmenter> segmenter_holder = Handle<JSSegmenter>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ segmenter_holder->set_flags(0);
+
+ // 10. Set segmenter.[[Locale]] to the value of r.[[Locale]].
+ segmenter_holder->set_locale(*locale_str);
+
+ // 14. Set segmenter.[[SegmenterGranularity]] to granularity.
+ segmenter_holder->set_granularity(granularity_enum);
+
segmenter_holder->set_icu_break_iterator(*managed_break_iterator);
return segmenter_holder;
}
@@ -157,9 +159,8 @@ Handle<String> JSSegmenter::GranularityAsString() const {
return GetReadOnlyRoots().word_string_handle();
case Granularity::SENTENCE:
return GetReadOnlyRoots().sentence_string_handle();
- case Granularity::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
const std::set<std::string>& JSSegmenter::GetAvailableLocales() {
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index 423dd67497..641cf106fb 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -30,11 +30,11 @@ namespace internal {
class JSSegmenter : public JSObject {
public:
- // Initializes segmenter object with properties derived from input
- // locales and options.
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSSegmenter> Initialize(
- Isolate* isolate, Handle<JSSegmenter> segmenter_holder,
- Handle<Object> locales, Handle<Object> options);
+ // Creates segmenter object with properties derived from input locales and
+ // options.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSSegmenter> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSSegmenter> segmenter_holder);
@@ -56,8 +56,7 @@ class JSSegmenter : public JSObject {
enum class Granularity {
GRAPHEME, // for character-breaks
WORD, // for word-breaks
- SENTENCE, // for sentence-breaks
- COUNT
+ SENTENCE // for sentence-breaks
};
inline void set_granularity(Granularity granularity);
inline Granularity granularity() const;
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 6632a31002..46f28e883e 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -97,16 +97,16 @@ void JSFinalizationGroup::Register(
}
}
-void JSFinalizationGroup::Unregister(
- Handle<JSFinalizationGroup> finalization_group, Handle<Object> key,
- Isolate* isolate) {
+bool JSFinalizationGroup::Unregister(
+ Handle<JSFinalizationGroup> finalization_group,
+ Handle<JSReceiver> unregister_token, Isolate* isolate) {
// Iterate through the doubly linked list of WeakCells associated with the
// key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of
// its FinalizationGroup; remove it from there.
if (!finalization_group->key_map().IsUndefined(isolate)) {
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
- Object value = key_map->Lookup(key);
+ Object value = key_map->Lookup(unregister_token);
Object undefined = ReadOnlyRoots(isolate).undefined_value();
while (value.IsWeakCell()) {
WeakCell weak_cell = WeakCell::cast(value);
@@ -116,9 +116,13 @@ void JSFinalizationGroup::Unregister(
weak_cell.set_key_list_next(undefined);
}
bool was_present;
- key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
+ key_map = ObjectHashTable::Remove(isolate, key_map, unregister_token,
+ &was_present);
finalization_group->set_key_map(*key_map);
+ return was_present;
}
+
+ return false;
}
bool JSFinalizationGroup::NeedsCleanup() const {
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index b846c2e608..6a401fecee 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -41,8 +41,9 @@ class JSFinalizationGroup : public JSObject {
Handle<JSReceiver> target,
Handle<Object> holdings, Handle<Object> key,
Isolate* isolate);
- inline static void Unregister(Handle<JSFinalizationGroup> finalization_group,
- Handle<Object> key, Isolate* isolate);
+ inline static bool Unregister(Handle<JSFinalizationGroup> finalization_group,
+ Handle<JSReceiver> unregister_token,
+ Isolate* isolate);
// Returns true if the cleared_cells list is non-empty.
inline bool NeedsCleanup() const;
@@ -57,24 +58,13 @@ class JSFinalizationGroup : public JSObject {
// Constructs an iterator for the WeakCells in the cleared_cells list and
// calls the user's cleanup function.
- static void Cleanup(Handle<JSFinalizationGroup> finalization_group,
- Isolate* isolate);
-
-// Layout description.
-#define JS_FINALIZATION_GROUP_FIELDS(V) \
- V(kNativeContextOffset, kTaggedSize) \
- V(kCleanupOffset, kTaggedSize) \
- V(kActiveCellsOffset, kTaggedSize) \
- V(kClearedCellsOffset, kTaggedSize) \
- V(kKeyMapOffset, kTaggedSize) \
- V(kNextOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
+ static void Cleanup(Isolate* isolate,
+ Handle<JSFinalizationGroup> finalization_group,
+ Handle<Object> callback);
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_FINALIZATION_GROUP_FIELDS)
-#undef JS_FINALIZATION_GROUP_FIELDS
+ TORQUE_GENERATED_JSFINALIZATION_GROUP_FIELDS)
// Bitfields in flags.
class ScheduledForCleanupField : public BitField<bool, 0, 1> {};
@@ -106,21 +96,9 @@ class WeakCell : public HeapObject {
DECL_ACCESSORS(key_list_prev, Object)
DECL_ACCESSORS(key_list_next, Object)
-// Layout description.
-#define WEAK_CELL_FIELDS(V) \
- V(kFinalizationGroupOffset, kTaggedSize) \
- V(kTargetOffset, kTaggedSize) \
- V(kHoldingsOffset, kTaggedSize) \
- V(kPrevOffset, kTaggedSize) \
- V(kNextOffset, kTaggedSize) \
- V(kKeyOffset, kTaggedSize) \
- V(kKeyListPrevOffset, kTaggedSize) \
- V(kKeyListNextOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_CELL_FIELDS)
-#undef WEAK_CELL_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WEAK_CELL_FIELDS)
class BodyDescriptor;
@@ -146,14 +124,9 @@ class JSWeakRef : public JSObject {
DECL_ACCESSORS(target, HeapObject)
-// Layout description.
-#define JS_WEAK_REF_FIELDS(V) \
- V(kTargetOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_REF_FIELDS)
-#undef JS_WEAK_REF_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_REF_FIELDS)
class BodyDescriptor;
@@ -189,15 +162,10 @@ class JSFinalizationGroupCleanupIterator : public JSObject {
DECL_ACCESSORS(finalization_group, JSFinalizationGroup)
-// Layout description.
-#define JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS(V) \
- V(kFinalizationGroupOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
-#undef JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSFINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator, JSObject);
};
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index d3a1f6bdc2..18b38ed744 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -395,6 +395,11 @@ MaybeHandle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(
GetKeysConversion keys_conversion) {
+ // TODO(v8:9401): We should extend the fast path of KeyAccumulator::GetKeys to
+ // also use fast path even when filter = SKIP_SYMBOLS. We used to pass wrong
+ // filter to use fast path in cases where we tried to verify all properties
+ // are enumerable. However these checks weren't correct and passing the wrong
+ // filter led to wrong behaviour.
if (filter_ == ENUMERABLE_STRINGS) {
Handle<FixedArray> keys;
if (GetKeysFast(keys_conversion).ToHandle(&keys)) {
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
index 49683da267..ad0a058a92 100644
--- a/deps/v8/src/objects/layout-descriptor-inl.h
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -209,11 +209,11 @@ int LayoutDescriptor::number_of_layout_words() {
}
uint32_t LayoutDescriptor::get_layout_word(int index) const {
- return get_uint32(index);
+ return get_uint32_relaxed(index);
}
void LayoutDescriptor::set_layout_word(int index, uint32_t value) {
- set_uint32(index, value);
+ set_uint32_relaxed(index, value);
}
// LayoutDescriptorHelper is a helper class for querying whether inobject
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 1ddb333cff..32b43cd8f7 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -15,6 +15,10 @@
namespace v8 {
namespace internal {
+//
+// ObjectBoilerplateDescription
+//
+
OBJECT_CONSTRUCTORS_IMPL(ObjectBoilerplateDescription, FixedArray)
CAST_ACCESSOR(ObjectBoilerplateDescription)
@@ -22,6 +26,70 @@ CAST_ACCESSOR(ObjectBoilerplateDescription)
SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
FixedArray::OffsetOfElementAt(kLiteralTypeOffset))
+Object ObjectBoilerplateDescription::name(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return name(isolate, index);
+}
+
+Object ObjectBoilerplateDescription::name(Isolate* isolate, int index) const {
+ // get() already checks for out of bounds access, but we do not want to allow
+ // access to the last element, if it is the number of properties.
+ DCHECK_NE(size(), index);
+ return get(isolate, 2 * index + kDescriptionStartIndex);
+}
+
+Object ObjectBoilerplateDescription::value(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return value(isolate, index);
+}
+
+Object ObjectBoilerplateDescription::value(Isolate* isolate, int index) const {
+ return get(isolate, 2 * index + 1 + kDescriptionStartIndex);
+}
+
+void ObjectBoilerplateDescription::set_key_value(int index, Object key,
+ Object value) {
+ DCHECK_LT(index, size());
+ DCHECK_GE(index, 0);
+ set(2 * index + kDescriptionStartIndex, key);
+ set(2 * index + 1 + kDescriptionStartIndex, value);
+}
+
+int ObjectBoilerplateDescription::size() const {
+ DCHECK_EQ(0, (length() - kDescriptionStartIndex -
+ (this->has_number_of_properties() ? 1 : 0)) %
+ 2);
+ // Rounding is intended.
+ return (length() - kDescriptionStartIndex) / 2;
+}
+
+bool ObjectBoilerplateDescription::has_number_of_properties() const {
+ return (length() - kDescriptionStartIndex) % 2 != 0;
+}
+
+int ObjectBoilerplateDescription::backing_store_size() const {
+ if (has_number_of_properties()) {
+ // If present, the last entry contains the number of properties.
+ return Smi::ToInt(this->get(length() - 1));
+ }
+ // If the number is not given explicitly, we assume there are no
+ // properties with computed names.
+ return size();
+}
+
+void ObjectBoilerplateDescription::set_backing_store_size(
+ int backing_store_size) {
+ DCHECK(has_number_of_properties());
+ DCHECK_NE(size(), backing_store_size);
+ CHECK(Smi::IsValid(backing_store_size));
+ // TODO(ishell): move this value to the header
+ set(length() - 1, Smi::FromInt(backing_store_size));
+}
+
+//
+// ClassBoilerplate
+//
+
OBJECT_CONSTRUCTORS_IMPL(ClassBoilerplate, FixedArray)
CAST_ACCESSOR(ClassBoilerplate)
@@ -52,6 +120,10 @@ ACCESSORS(ClassBoilerplate, instance_elements_template, Object,
ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
FixedArray::OffsetOfElementAt(kPrototypeComputedPropertiesIndex))
+//
+// ArrayBoilerplateDescription
+//
+
OBJECT_CONSTRUCTORS_IMPL(ArrayBoilerplateDescription, Struct)
CAST_ACCESSOR(ArrayBoilerplateDescription)
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index bfdbd9317b..7328c11f31 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -17,56 +17,6 @@
namespace v8 {
namespace internal {
-Object ObjectBoilerplateDescription::name(int index) const {
- // get() already checks for out of bounds access, but we do not want to allow
- // access to the last element, if it is the number of properties.
- DCHECK_NE(size(), index);
- return get(2 * index + kDescriptionStartIndex);
-}
-
-Object ObjectBoilerplateDescription::value(int index) const {
- return get(2 * index + 1 + kDescriptionStartIndex);
-}
-
-void ObjectBoilerplateDescription::set_key_value(int index, Object key,
- Object value) {
- DCHECK_LT(index, size());
- DCHECK_GE(index, 0);
- set(2 * index + kDescriptionStartIndex, key);
- set(2 * index + 1 + kDescriptionStartIndex, value);
-}
-
-int ObjectBoilerplateDescription::size() const {
- DCHECK_EQ(0, (length() - kDescriptionStartIndex -
- (this->has_number_of_properties() ? 1 : 0)) %
- 2);
- // Rounding is intended.
- return (length() - kDescriptionStartIndex) / 2;
-}
-
-int ObjectBoilerplateDescription::backing_store_size() const {
- if (has_number_of_properties()) {
- // If present, the last entry contains the number of properties.
- return Smi::ToInt(this->get(length() - 1));
- }
- // If the number is not given explicitly, we assume there are no
- // properties with computed names.
- return size();
-}
-
-void ObjectBoilerplateDescription::set_backing_store_size(
- Isolate* isolate, int backing_store_size) {
- DCHECK(has_number_of_properties());
- DCHECK_NE(size(), backing_store_size);
- Handle<Object> backing_store_size_obj =
- isolate->factory()->NewNumberFromInt(backing_store_size);
- set(length() - 1, *backing_store_size_obj);
-}
-
-bool ObjectBoilerplateDescription::has_number_of_properties() const {
- return (length() - kDescriptionStartIndex) % 2 != 0;
-}
-
namespace {
inline int EncodeComputedEntry(ClassBoilerplate::ValueKind value_kind,
@@ -306,8 +256,12 @@ class ObjectDescriptor {
void IncPropertiesCount() { ++property_count_; }
void IncElementsCount() { ++element_count_; }
+ explicit ObjectDescriptor(int property_slack)
+ : property_slack_(property_slack) {}
+
bool HasDictionaryProperties() const {
- return computed_count_ > 0 || property_count_ > kMaxNumberOfDescriptors;
+ return computed_count_ > 0 ||
+ (property_count_ + property_slack_) > kMaxNumberOfDescriptors;
}
Handle<Object> properties_template() const {
@@ -324,17 +278,17 @@ class ObjectDescriptor {
return computed_properties_;
}
- void CreateTemplates(Isolate* isolate, int slack) {
+ void CreateTemplates(Isolate* isolate) {
Factory* factory = isolate->factory();
descriptor_array_template_ = factory->empty_descriptor_array();
properties_dictionary_template_ = factory->empty_property_dictionary();
- if (property_count_ || HasDictionaryProperties() || slack) {
+ if (property_count_ || computed_count_ || property_slack_) {
if (HasDictionaryProperties()) {
properties_dictionary_template_ = NameDictionary::New(
- isolate, property_count_ + computed_count_ + slack);
+ isolate, property_count_ + computed_count_ + property_slack_);
} else {
- descriptor_array_template_ =
- DescriptorArray::Allocate(isolate, 0, property_count_ + slack);
+ descriptor_array_template_ = DescriptorArray::Allocate(
+ isolate, 0, property_count_ + property_slack_);
}
}
elements_dictionary_template_ =
@@ -419,6 +373,7 @@ class ObjectDescriptor {
}
private:
+ const int property_slack_;
int property_count_ = 0;
int next_enumeration_index_ = PropertyDetails::kInitialIndex;
int element_count_ = 0;
@@ -454,8 +409,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
// in CanonicalHandleScope.
HandleScope scope(isolate);
Factory* factory = isolate->factory();
- ObjectDescriptor static_desc;
- ObjectDescriptor instance_desc;
+ ObjectDescriptor static_desc(kMinimumClassPropertiesCount);
+ ObjectDescriptor instance_desc(kMinimumPrototypePropertiesCount);
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
@@ -475,7 +430,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
//
// Initialize class object template.
//
- static_desc.CreateTemplates(isolate, kMinimumClassPropertiesCount);
+ static_desc.CreateTemplates(isolate);
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
{
// Add length_accessor.
@@ -509,7 +464,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
//
// Initialize prototype object template.
//
- instance_desc.CreateTemplates(isolate, kMinimumPrototypePropertiesCount);
+ instance_desc.CreateTemplates(isolate);
{
Handle<Object> value(
Smi::FromInt(ClassBoilerplate::kConstructorArgumentIndex), isolate);
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 35ae98a05b..f009a54f8a 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -21,20 +21,23 @@ class ClassLiteral;
// of properties in the backing store. This number includes properties with
// computed names that are not
// in the list.
+// TODO(ishell): Don't derive from FixedArray as it already has its own map.
class ObjectBoilerplateDescription : public FixedArray {
public:
- Object name(int index) const;
- Object value(int index) const;
+ inline Object name(int index) const;
+ inline Object name(Isolate* isolate, int index) const;
- void set_key_value(int index, Object key, Object value);
+ inline Object value(int index) const;
+ inline Object value(Isolate* isolate, int index) const;
+
+ inline void set_key_value(int index, Object key, Object value);
// The number of boilerplate properties.
- int size() const;
+ inline int size() const;
// Number of boilerplate properties and properties with computed names.
- int backing_store_size() const;
-
- void set_backing_store_size(Isolate* isolate, int backing_store_size);
+ inline int backing_store_size() const;
+ inline void set_backing_store_size(int backing_store_size);
// Used to encode ObjectLiteral::Flags for nested object literals
// Stored as the first element of the fixed array
@@ -47,7 +50,7 @@ class ObjectBoilerplateDescription : public FixedArray {
DECL_PRINTER(ObjectBoilerplateDescription)
private:
- bool has_number_of_properties() const;
+ inline bool has_number_of_properties() const;
OBJECT_CONSTRUCTORS(ObjectBoilerplateDescription, FixedArray);
};
diff --git a/deps/v8/src/objects/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
index 5b2dbff258..648398be5e 100644
--- a/deps/v8/src/objects/lookup-inl.h
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -31,7 +31,7 @@ LookupIterator::LookupIterator(Handle<Object> receiver, Handle<Name> name,
LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name, Handle<JSReceiver> holder,
Configuration configuration)
- : configuration_(ComputeConfiguration(configuration, name)),
+ : configuration_(ComputeConfiguration(isolate, configuration, name)),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(isolate),
@@ -90,7 +90,7 @@ Handle<Name> LookupIterator::GetName() {
}
bool LookupIterator::is_dictionary_holder() const {
- return !holder_->HasFastProperties();
+ return !holder_->HasFastProperties(isolate_);
}
Handle<Map> LookupIterator::transition_map() const {
@@ -111,23 +111,23 @@ Handle<T> LookupIterator::GetHolder() const {
bool LookupIterator::ExtendingNonExtensible(Handle<JSReceiver> receiver) {
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
- return !receiver->map().is_extensible() &&
- (IsElement() || !name_->IsPrivate());
+ return !receiver->map(isolate_).is_extensible() &&
+ (IsElement() || !name_->IsPrivate(isolate_));
}
bool LookupIterator::IsCacheableTransition() {
DCHECK_EQ(TRANSITION, state_);
- return transition_->IsPropertyCell() ||
+ return transition_->IsPropertyCell(isolate_) ||
(transition_map()->is_dictionary_map() &&
- !GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
- transition_map()->GetBackPointer().IsMap();
+ !GetStoreTarget<JSReceiver>()->HasFastProperties(isolate_)) ||
+ transition_map()->GetBackPointer(isolate_).IsMap(isolate_);
}
void LookupIterator::UpdateProtector() {
if (IsElement()) return;
// This list must be kept in sync with
// CodeStubAssembler::CheckForAssociatedProtector!
- ReadOnlyRoots roots(heap());
+ ReadOnlyRoots roots(isolate_);
if (*name_ == roots.is_concat_spreadable_symbol() ||
*name_ == roots.constructor_string() || *name_ == roots.next_string() ||
*name_ == roots.species_symbol() || *name_ == roots.iterator_symbol() ||
@@ -139,52 +139,59 @@ void LookupIterator::UpdateProtector() {
int LookupIterator::descriptor_number() const {
DCHECK(!IsElement());
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
return number_;
}
int LookupIterator::dictionary_entry() const {
DCHECK(!IsElement());
DCHECK(has_property_);
- DCHECK(!holder_->HasFastProperties());
+ DCHECK(!holder_->HasFastProperties(isolate_));
return number_;
}
+// static
LookupIterator::Configuration LookupIterator::ComputeConfiguration(
- Configuration configuration, Handle<Name> name) {
- return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
+ Isolate* isolate, Configuration configuration, Handle<Name> name) {
+ return name->IsPrivate(isolate) ? OWN_SKIP_INTERCEPTOR : configuration;
}
+// static
Handle<JSReceiver> LookupIterator::GetRoot(Isolate* isolate,
Handle<Object> receiver,
uint32_t index) {
- if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ if (receiver->IsJSReceiver(isolate))
+ return Handle<JSReceiver>::cast(receiver);
return GetRootForNonJSReceiver(isolate, receiver, index);
}
template <class T>
Handle<T> LookupIterator::GetStoreTarget() const {
- DCHECK(receiver_->IsJSReceiver());
- if (receiver_->IsJSGlobalProxy()) {
- Map map = JSGlobalProxy::cast(*receiver_).map();
- if (map.has_hidden_prototype()) {
- return handle(JSGlobalObject::cast(map.prototype()), isolate_);
+ DCHECK(receiver_->IsJSReceiver(isolate_));
+ if (receiver_->IsJSGlobalProxy(isolate_)) {
+ HeapObject prototype =
+ JSGlobalProxy::cast(*receiver_).map(isolate_).prototype(isolate_);
+ if (prototype.IsJSGlobalObject(isolate_)) {
+ return handle(JSGlobalObject::cast(prototype), isolate_);
}
}
return Handle<T>::cast(receiver_);
}
+// static
template <bool is_element>
-InterceptorInfo LookupIterator::GetInterceptor(JSObject holder) {
- return is_element ? holder.GetIndexedInterceptor()
- : holder.GetNamedInterceptor();
+InterceptorInfo LookupIterator::GetInterceptor(Isolate* isolate,
+ JSObject holder) {
+ return is_element ? holder.GetIndexedInterceptor(isolate)
+ : holder.GetNamedInterceptor(isolate);
}
inline Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
DCHECK_EQ(INTERCEPTOR, state_);
- InterceptorInfo result =
- IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
- : GetInterceptor<false>(JSObject::cast(*holder_));
+ JSObject holder = JSObject::cast(*holder_);
+ InterceptorInfo result = IsElement()
+ ? GetInterceptor<true>(isolate_, holder)
+ : GetInterceptor<false>(isolate_, holder);
return handle(result, isolate_);
}
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 744cf67482..33130aafe5 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -80,50 +80,6 @@ LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
return LookupIterator(isolate, receiver, name, configuration);
}
-// TODO(ishell): Consider removing this way of LookupIterator creation.
-// static
-LookupIterator LookupIterator::ForTransitionHandler(
- Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, MaybeHandle<Map> maybe_transition_map) {
- Handle<Map> transition_map;
- if (!maybe_transition_map.ToHandle(&transition_map) ||
- !transition_map->IsPrototypeValidityCellValid()) {
- // This map is not a valid transition handler, so full lookup is required.
- return LookupIterator(isolate, receiver, name);
- }
-
- PropertyDetails details = PropertyDetails::Empty();
- bool has_property;
- if (transition_map->is_dictionary_map()) {
- details = PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
- has_property = false;
- } else {
- details = transition_map->GetLastDescriptorDetails();
- has_property = true;
- }
-#ifdef DEBUG
- if (name->IsPrivate()) {
- DCHECK_EQ(DONT_ENUM, details.attributes());
- } else {
- DCHECK_EQ(NONE, details.attributes());
- }
-#endif
- LookupIterator it(isolate, receiver, name, transition_map, details,
- has_property);
-
- if (!transition_map->is_dictionary_map()) {
- int descriptor_number = transition_map->LastAdded();
- Handle<Map> new_map =
- Map::PrepareForDataProperty(isolate, transition_map, descriptor_number,
- PropertyConstness::kConst, value);
- // Reload information; this is no-op if nothing changed.
- it.property_details_ =
- new_map->instance_descriptors().GetDetails(descriptor_number);
- it.transition_ = new_map;
- }
- return it;
-}
-
LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name, Handle<Map> transition_map,
PropertyDetails details, bool has_property)
@@ -151,7 +107,7 @@ void LookupIterator::Start() {
holder_ = initial_holder_;
JSReceiver holder = *holder_;
- Map map = holder.map();
+ Map map = holder.map(isolate_);
state_ = LookupInHolder<is_element>(map, holder);
if (IsFound()) return;
@@ -169,7 +125,7 @@ void LookupIterator::Next() {
has_property_ = false;
JSReceiver holder = *holder_;
- Map map = holder.map();
+ Map map = holder.map(isolate_);
if (map.IsSpecialReceiverMap()) {
state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
@@ -195,7 +151,7 @@ void LookupIterator::NextInternal(Map map, JSReceiver holder) {
return;
}
holder = maybe_holder;
- map = holder.map();
+ map = holder.map(isolate_);
state_ = LookupInHolder<is_element>(map, holder);
} while (!IsFound());
@@ -218,17 +174,17 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
Isolate* isolate, Handle<Object> receiver, uint32_t index) {
// Strings are the only objects with properties (only elements) directly on
// the wrapper. Hence we can skip generating the wrapper for all other cases.
- if (receiver->IsString() &&
+ if (receiver->IsString(isolate) &&
index < static_cast<uint32_t>(String::cast(*receiver).length())) {
// TODO(verwaest): Speed this up. Perhaps use a cached wrapper on the native
// context, ensuring that we don't leak it into JS?
Handle<JSFunction> constructor = isolate->string_function();
Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
- Handle<JSValue>::cast(result)->set_value(*receiver);
+ Handle<JSPrimitiveWrapper>::cast(result)->set_value(*receiver);
return result;
}
- auto root =
- handle(receiver->GetPrototypeChainRootMap(isolate).prototype(), isolate);
+ auto root = handle(
+ receiver->GetPrototypeChainRootMap(isolate).prototype(isolate), isolate);
if (root->IsNull(isolate)) {
isolate->PushStackTraceAndDie(reinterpret_cast<void*>(receiver->ptr()));
}
@@ -236,8 +192,8 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
}
Handle<Map> LookupIterator::GetReceiverMap() const {
- if (receiver_->IsNumber()) return factory()->heap_number_map();
- return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
+ if (receiver_->IsNumber(isolate_)) return factory()->heap_number_map();
+ return handle(Handle<HeapObject>::cast(receiver_)->map(isolate_), isolate_);
}
bool LookupIterator::HasAccess() const {
@@ -250,13 +206,13 @@ template <bool is_element>
void LookupIterator::ReloadPropertyInformation() {
state_ = BEFORE_PROPERTY;
interceptor_state_ = InterceptorState::kUninitialized;
- state_ = LookupInHolder<is_element>(holder_->map(), *holder_);
- DCHECK(IsFound() || !holder_->HasFastProperties());
+ state_ = LookupInHolder<is_element>(holder_->map(isolate_), *holder_);
+ DCHECK(IsFound() || !holder_->HasFastProperties(isolate_));
}
namespace {
-bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver holder) {
+bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, HeapObject object) {
static uint32_t context_slots[] = {
#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype) \
Context::TYPE##_ARRAY_FUN_INDEX,
@@ -265,91 +221,99 @@ bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver holder) {
#undef TYPED_ARRAY_CONTEXT_SLOTS
};
- if (!holder.IsJSFunction()) return false;
+ if (!object.IsJSFunction(isolate)) return false;
return std::any_of(
std::begin(context_slots), std::end(context_slots),
- [=](uint32_t slot) { return isolate->IsInAnyContext(holder, slot); });
+ [=](uint32_t slot) { return isolate->IsInAnyContext(object, slot); });
}
} // namespace
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
+ if (!receiver_->IsHeapObject()) return;
+ Handle<HeapObject> receiver = Handle<HeapObject>::cast(receiver_);
- ReadOnlyRoots roots(heap());
+ Handle<NativeContext> native_context = isolate_->native_context();
+
+ ReadOnlyRoots roots(isolate_);
if (*name_ == roots.constructor_string()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
!isolate_->IsPromiseSpeciesLookupChainIntact() &&
- !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact(native_context) &&
!isolate_->IsTypedArraySpeciesLookupChainIntact()) {
return;
}
// Setting the constructor property could change an instance's @@species
- if (holder_->IsJSArray()) {
+ if (receiver->IsJSArray(isolate_)) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
return;
- } else if (holder_->IsJSPromise()) {
+ } else if (receiver->IsJSPromise(isolate_)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
return;
- } else if (holder_->IsJSRegExp()) {
- if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
- isolate_->InvalidateRegExpSpeciesProtector();
+ } else if (receiver->IsJSRegExp(isolate_)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
+ isolate_->InvalidateRegExpSpeciesProtector(native_context);
return;
- } else if (holder_->IsJSTypedArray()) {
+ } else if (receiver->IsJSTypedArray(isolate_)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
return;
}
- if (holder_->map().is_prototype_map()) {
+ if (receiver->map(isolate_).is_prototype_map()) {
DisallowHeapAllocation no_gc;
// Setting the constructor of any prototype with the @@species protector
// (of any realm) also needs to invalidate the protector.
- // For typed arrays, we check a prototype of this holder since TypedArrays
- // have different prototypes for each type, and their parent prototype is
- // pointing the same TYPED_ARRAY_PROTOTYPE.
- if (isolate_->IsInAnyContext(*holder_,
+ // For typed arrays, we check a prototype of this receiver since
+ // TypedArrays have different prototypes for each type, and their parent
+ // prototype is pointing the same TYPED_ARRAY_PROTOTYPE.
+ if (isolate_->IsInAnyContext(*receiver,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayPrototypeConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::PROMISE_PROTOTYPE_INDEX)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::REGEXP_PROTOTYPE_INDEX)) {
- if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
- isolate_->InvalidateRegExpSpeciesProtector();
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
+ isolate_->InvalidateRegExpSpeciesProtector(native_context);
} else if (isolate_->IsInAnyContext(
- holder_->map().prototype(),
+ receiver->map(isolate_).prototype(isolate_),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
}
}
} else if (*name_ == roots.next_string()) {
- if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
+ if (receiver->IsJSArrayIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
// Setting the next property of %ArrayIteratorPrototype% also needs to
// invalidate the array iterator protector.
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
- } else if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
+ } else if (receiver->IsJSMapIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
if (!isolate_->IsMapIteratorLookupChainIntact()) return;
isolate_->InvalidateMapIteratorProtector();
- } else if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) {
+ } else if (receiver->IsJSSetIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) {
if (!isolate_->IsSetIteratorLookupChainIntact()) return;
isolate_->InvalidateSetIteratorProtector();
- } else if (isolate_->IsInAnyContext(
- *receiver_,
+ } else if (receiver->IsJSStringIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver,
Context::INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX)) {
// Setting the next property of %StringIteratorPrototype% invalidates the
// string iterator protector.
@@ -359,26 +323,26 @@ void LookupIterator::InternalUpdateProtector() {
} else if (*name_ == roots.species_symbol()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
!isolate_->IsPromiseSpeciesLookupChainIntact() &&
- !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact(native_context) &&
!isolate_->IsTypedArraySpeciesLookupChainIntact()) {
return;
}
// Setting the Symbol.species property of any Array, Promise or TypedArray
// constructor invalidates the @@species protector
- if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
+ if (isolate_->IsInAnyContext(*receiver, Context::ARRAY_FUNCTION_INDEX)) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
isolate_->InvalidateArraySpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::PROMISE_FUNCTION_INDEX)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::REGEXP_FUNCTION_INDEX)) {
- if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
- isolate_->InvalidateRegExpSpeciesProtector();
- } else if (IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
+ isolate_->InvalidateRegExpSpeciesProtector(native_context);
+ } else if (IsTypedArrayFunctionInAnyContext(isolate_, *receiver)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
}
@@ -386,23 +350,33 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
isolate_->InvalidateIsConcatSpreadableProtector();
} else if (*name_ == roots.iterator_symbol()) {
- if (holder_->IsJSArray()) {
+ if (receiver->IsJSArray(isolate_)) {
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
+ } else if (receiver->IsJSSet(isolate_) || receiver->IsJSSetIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX) ||
+ isolate_->IsInAnyContext(*receiver,
+ Context::INITIAL_SET_PROTOTYPE_INDEX)) {
+ if (isolate_->IsSetIteratorLookupChainIntact()) {
+ isolate_->InvalidateSetIteratorProtector();
+ }
+ } else if (receiver->IsJSMapIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
+ if (isolate_->IsMapIteratorLookupChainIntact()) {
+ isolate_->InvalidateMapIteratorProtector();
+ }
} else if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) {
+ *receiver, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) {
if (isolate_->IsMapIteratorLookupChainIntact()) {
isolate_->InvalidateMapIteratorProtector();
}
if (isolate_->IsSetIteratorLookupChainIntact()) {
isolate_->InvalidateSetIteratorProtector();
}
- } else if (isolate_->IsInAnyContext(*holder_,
- Context::INITIAL_SET_PROTOTYPE_INDEX)) {
- if (!isolate_->IsSetIteratorLookupChainIntact()) return;
- isolate_->InvalidateSetIteratorProtector();
} else if (isolate_->IsInAnyContext(
- *receiver_, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
+ *receiver, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
// Setting the Symbol.iterator property of String.prototype invalidates
// the string iterator protector. Symbol.iterator can also be set on a
// String wrapper, but not on a primitive string. We only support
@@ -414,7 +388,7 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsPromiseResolveLookupChainIntact()) return;
// Setting the "resolve" property on any %Promise% intrinsic object
// invalidates the Promise.resolve protector.
- if (isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX)) {
+ if (isolate_->IsInAnyContext(*receiver, Context::PROMISE_FUNCTION_INDEX)) {
isolate_->InvalidatePromiseResolveProtector();
}
} else if (*name_ == roots.then_string()) {
@@ -426,10 +400,10 @@ void LookupIterator::InternalUpdateProtector() {
// to guard the fast-path in AsyncGeneratorResolve, where we can skip
// the ResolvePromise step and go directly to FulfillPromise if we
// know that the Object.prototype doesn't contain a "then" method.
- if (holder_->IsJSPromise() ||
- isolate_->IsInAnyContext(*holder_,
+ if (receiver->IsJSPromise(isolate_) ||
+ isolate_->IsInAnyContext(*receiver,
Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ||
- isolate_->IsInAnyContext(*holder_, Context::PROMISE_PROTOTYPE_INDEX)) {
+ isolate_->IsInAnyContext(*receiver, Context::PROMISE_PROTOTYPE_INDEX)) {
isolate_->InvalidatePromiseThenProtector();
}
}
@@ -441,15 +415,16 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
// JSProxy does not have fast properties so we do an early return.
- DCHECK_IMPLIES(holder->IsJSProxy(), !holder->HasFastProperties());
- DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
- if (holder->IsJSProxy()) return;
+ DCHECK_IMPLIES(holder->IsJSProxy(isolate_),
+ !holder->HasFastProperties(isolate_));
+ DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
+ if (holder->IsJSProxy(isolate_)) return;
Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- ElementsKind kind = holder_obj->GetElementsKind();
- ElementsKind to = value->OptimalElementsKind();
+ ElementsKind kind = holder_obj->GetElementsKind(isolate_);
+ ElementsKind to = value->OptimalElementsKind(isolate_);
if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
to = GetMoreGeneralElementsKind(kind, to);
@@ -464,17 +439,18 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
return;
}
- if (holder_obj->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject(isolate_)) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
- Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()),
+ JSGlobalObject::cast(*holder_obj).global_dictionary(isolate_),
+ isolate());
+ Handle<PropertyCell> cell(dictionary->CellAt(isolate_, dictionary_entry()),
isolate());
property_details_ = cell->property_details();
PropertyCell::PrepareForValue(isolate(), dictionary, dictionary_entry(),
value, property_details_);
return;
}
- if (!holder_obj->HasFastProperties()) return;
+ if (!holder_obj->HasFastProperties(isolate_)) return;
PropertyConstness new_constness = PropertyConstness::kConst;
if (constness() == PropertyConstness::kConst) {
@@ -485,20 +461,28 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
new_constness = PropertyConstness::kMutable;
}
- Handle<Map> old_map(holder_obj->map(), isolate_);
- Handle<Map> new_map = Map::PrepareForDataProperty(
- isolate(), old_map, descriptor_number(), new_constness, value);
+ Handle<Map> old_map(holder_obj->map(isolate_), isolate_);
+ DCHECK(!old_map->is_dictionary_map());
- if (old_map.is_identical_to(new_map)) {
- // Update the property details if the representation was None.
- if (constness() != new_constness || representation().IsNone()) {
- property_details_ =
- new_map->instance_descriptors().GetDetails(descriptor_number());
+ Handle<Map> new_map = Map::Update(isolate_, old_map);
+ if (!new_map->is_dictionary_map()) {
+ new_map = Map::PrepareForDataProperty(
+ isolate(), new_map, descriptor_number(), new_constness, value);
+
+ if (old_map.is_identical_to(new_map)) {
+ // Update the property details if the representation was None.
+ if (constness() != new_constness || representation().IsNone()) {
+ property_details_ = new_map->instance_descriptors(isolate_).GetDetails(
+ descriptor_number());
+ }
+ return;
}
- return;
}
+ // We should only get here if the new_map is different from the old map,
+ // otherwise we would have falled through to the is_identical_to check above.
+ DCHECK_NE(*old_map, *new_map);
- JSObject::MigrateToMap(holder_obj, new_map);
+ JSObject::MigrateToMap(isolate_, holder_obj, new_map);
ReloadPropertyInformation<false>();
}
@@ -510,53 +494,59 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
// Property details can never change for private properties.
- if (holder->IsJSProxy()) {
- DCHECK(name()->IsPrivate());
+ if (holder->IsJSProxy(isolate_)) {
+ DCHECK(name()->IsPrivate(isolate_));
return;
}
Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- DCHECK(!holder_obj->HasTypedArrayElements());
- DCHECK(attributes != NONE || !holder_obj->HasFastElements());
- Handle<FixedArrayBase> elements(holder_obj->elements(), isolate());
- holder_obj->GetElementsAccessor()->Reconfigure(holder_obj, elements,
- number_, value, attributes);
+ DCHECK(!holder_obj->HasTypedArrayElements(isolate_));
+ DCHECK(attributes != NONE || !holder_obj->HasFastElements(isolate_));
+ Handle<FixedArrayBase> elements(holder_obj->elements(isolate_), isolate());
+ holder_obj->GetElementsAccessor(isolate_)->Reconfigure(
+ holder_obj, elements, number_, value, attributes);
ReloadPropertyInformation<true>();
- } else if (holder_obj->HasFastProperties()) {
- Handle<Map> old_map(holder_obj->map(), isolate_);
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
- isolate_, old_map, descriptor_number(), i::kData, attributes);
+ } else if (holder_obj->HasFastProperties(isolate_)) {
+ Handle<Map> old_map(holder_obj->map(isolate_), isolate_);
// Force mutable to avoid changing constant value by reconfiguring
// kData -> kAccessor -> kData.
- new_map =
- Map::PrepareForDataProperty(isolate(), new_map, descriptor_number(),
- PropertyConstness::kMutable, value);
- JSObject::MigrateToMap(holder_obj, new_map);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ isolate_, old_map, descriptor_number(), i::kData, attributes,
+ PropertyConstness::kMutable);
+ if (!new_map->is_dictionary_map()) {
+ // Make sure that the data property has a compatible representation.
+ // TODO(leszeks): Do this as part of ReconfigureExistingProperty.
+ new_map =
+ Map::PrepareForDataProperty(isolate(), new_map, descriptor_number(),
+ PropertyConstness::kMutable, value);
+ }
+ JSObject::MigrateToMap(isolate_, holder_obj, new_map);
ReloadPropertyInformation<false>();
}
- if (!IsElement() && !holder_obj->HasFastProperties()) {
+ if (!IsElement() && !holder_obj->HasFastProperties(isolate_)) {
PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
- if (holder_obj->map().is_prototype_map() &&
+ if (holder_obj->map(isolate_).is_prototype_map() &&
(property_details_.attributes() & READ_ONLY) == 0 &&
(attributes & READ_ONLY) != 0) {
// Invalidate prototype validity cell when a property is reconfigured
// from writable to read-only as this may invalidate transitioning store
// IC handlers.
- JSObject::InvalidatePrototypeChains(holder->map());
+ JSObject::InvalidatePrototypeChains(holder->map(isolate_));
}
- if (holder_obj->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject(isolate_)) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
+ JSGlobalObject::cast(*holder_obj).global_dictionary(isolate_),
+ isolate());
Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
isolate(), dictionary, dictionary_entry(), value, details);
cell->set_value(*value);
property_details_ = cell->property_details();
} else {
- Handle<NameDictionary> dictionary(holder_obj->property_dictionary(),
- isolate());
+ Handle<NameDictionary> dictionary(
+ holder_obj->property_dictionary(isolate_), isolate());
PropertyDetails original_details =
dictionary->DetailsAt(dictionary_entry());
int enumeration_index = original_details.dictionary_index();
@@ -583,21 +573,21 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
void LookupIterator::PrepareTransitionToDataProperty(
Handle<JSReceiver> receiver, Handle<Object> value,
PropertyAttributes attributes, StoreOrigin store_origin) {
- DCHECK_IMPLIES(receiver->IsJSProxy(), name()->IsPrivate());
+ DCHECK_IMPLIES(receiver->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
if (state_ == TRANSITION) return;
- if (!IsElement() && name()->IsPrivate()) {
+ if (!IsElement() && name()->IsPrivate(isolate_)) {
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
}
DCHECK(state_ != LookupIterator::ACCESSOR ||
- (GetAccessors()->IsAccessorInfo() &&
+ (GetAccessors()->IsAccessorInfo(isolate_) &&
AccessorInfo::cast(*GetAccessors()).is_special_data_property()));
DCHECK_NE(INTEGER_INDEXED_EXOTIC, state_);
DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
- Handle<Map> map(receiver->map(), isolate_);
+ Handle<Map> map(receiver->map(isolate_), isolate_);
// Dictionary maps can always have additional data properties.
if (map->is_dictionary_map()) {
@@ -608,9 +598,9 @@ void LookupIterator::PrepareTransitionToDataProperty(
int entry;
Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
global, name(), PropertyCellType::kUninitialized, &entry);
- Handle<GlobalDictionary> dictionary(global->global_dictionary(),
+ Handle<GlobalDictionary> dictionary(global->global_dictionary(isolate_),
isolate_);
- DCHECK(cell->value().IsTheHole(isolate_));
+ DCHECK(cell->value(isolate_).IsTheHole(isolate_));
DCHECK(!value->IsTheHole(isolate_));
transition_ = cell;
// Assign an enumeration index to the property and update
@@ -645,7 +635,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
property_details_ =
PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
} else {
- property_details_ = transition->GetLastDescriptorDetails();
+ property_details_ = transition->GetLastDescriptorDetails(isolate_);
has_property_ = true;
}
}
@@ -656,13 +646,14 @@ void LookupIterator::ApplyTransitionToDataProperty(
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
holder_ = receiver;
- if (receiver->IsJSGlobalObject()) {
- JSObject::InvalidatePrototypeChains(receiver->map());
+ if (receiver->IsJSGlobalObject(isolate_)) {
+ JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
state_ = DATA;
return;
}
Handle<Map> transition = transition_map();
- bool simple_transition = transition->GetBackPointer() == receiver->map();
+ bool simple_transition =
+ transition->GetBackPointer(isolate_) == receiver->map(isolate_);
if (configuration_ == DEFAULT && !transition->is_dictionary_map() &&
!transition->IsPrototypeValidityCellValid()) {
@@ -673,21 +664,23 @@ void LookupIterator::ApplyTransitionToDataProperty(
transition->set_prototype_validity_cell(*validity_cell);
}
- if (!receiver->IsJSProxy()) {
- JSObject::MigrateToMap(Handle<JSObject>::cast(receiver), transition);
+ if (!receiver->IsJSProxy(isolate_)) {
+ JSObject::MigrateToMap(isolate_, Handle<JSObject>::cast(receiver),
+ transition);
}
if (simple_transition) {
int number = transition->LastAdded();
number_ = static_cast<uint32_t>(number);
- property_details_ = transition->GetLastDescriptorDetails();
+ property_details_ = transition->GetLastDescriptorDetails(isolate_);
state_ = DATA;
- } else if (receiver->map().is_dictionary_map()) {
- Handle<NameDictionary> dictionary(receiver->property_dictionary(),
+ } else if (receiver->map(isolate_).is_dictionary_map()) {
+ Handle<NameDictionary> dictionary(receiver->property_dictionary(isolate_),
isolate_);
int entry;
- if (receiver->map().is_prototype_map() && receiver->IsJSObject()) {
- JSObject::InvalidatePrototypeChains(receiver->map());
+ if (receiver->map(isolate_).is_prototype_map() &&
+ receiver->IsJSObject(isolate_)) {
+ JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
}
dictionary = NameDictionary::Add(isolate(), dictionary, name(),
isolate_->factory()->uninitialized_value(),
@@ -708,11 +701,11 @@ void LookupIterator::Delete() {
Handle<JSReceiver> holder = Handle<JSReceiver>::cast(holder_);
if (IsElement()) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ElementsAccessor* accessor = object->GetElementsAccessor();
+ ElementsAccessor* accessor = object->GetElementsAccessor(isolate_);
accessor->Delete(object, number_);
} else {
- DCHECK(!name()->IsPrivateName());
- bool is_prototype_map = holder->map().is_prototype_map();
+ DCHECK(!name()->IsPrivateName(isolate_));
+ bool is_prototype_map = holder->map(isolate_).is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
@@ -721,13 +714,13 @@ void LookupIterator::Delete() {
PropertyNormalizationMode mode =
is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
- if (holder->HasFastProperties()) {
- JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
- "DeletingProperty");
+ if (holder->HasFastProperties(isolate_)) {
+ JSObject::NormalizeProperties(isolate_, Handle<JSObject>::cast(holder),
+ mode, 0, "DeletingProperty");
ReloadPropertyInformation<false>();
}
JSReceiver::DeleteNormalizedProperty(holder, number_);
- if (holder->IsJSObject()) {
+ if (holder->IsJSObject(isolate_)) {
JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
}
}
@@ -742,12 +735,12 @@ void LookupIterator::TransitionToAccessorProperty(
// handled via a trap. Adding properties to primitive values is not
// observable.
Handle<JSObject> receiver = GetStoreTarget<JSObject>();
- if (!IsElement() && name()->IsPrivate()) {
+ if (!IsElement() && name()->IsPrivate(isolate_)) {
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
}
- if (!IsElement() && !receiver->map().is_dictionary_map()) {
- Handle<Map> old_map(receiver->map(), isolate_);
+ if (!IsElement() && !receiver->map(isolate_).is_dictionary_map()) {
+ Handle<Map> old_map(receiver->map(isolate_), isolate_);
if (!holder_.is_identical_to(receiver)) {
holder_ = receiver;
@@ -760,13 +753,14 @@ void LookupIterator::TransitionToAccessorProperty(
Handle<Map> new_map = Map::TransitionToAccessorProperty(
isolate_, old_map, name_, descriptor, getter, setter, attributes);
- bool simple_transition = new_map->GetBackPointer() == receiver->map();
- JSObject::MigrateToMap(receiver, new_map);
+ bool simple_transition =
+ new_map->GetBackPointer(isolate_) == receiver->map(isolate_);
+ JSObject::MigrateToMap(isolate_, receiver, new_map);
if (simple_transition) {
int number = new_map->LastAdded();
number_ = static_cast<uint32_t>(number);
- property_details_ = new_map->GetLastDescriptorDetails();
+ property_details_ = new_map->GetLastDescriptorDetails(isolate_);
state_ = ACCESSOR;
return;
}
@@ -776,7 +770,7 @@ void LookupIterator::TransitionToAccessorProperty(
}
Handle<AccessorPair> pair;
- if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
+ if (state() == ACCESSOR && GetAccessors()->IsAccessorPair(isolate_)) {
pair = Handle<AccessorPair>::cast(GetAccessors());
// If the component and attributes are identical, nothing has to be done.
if (pair->Equals(*getter, *setter)) {
@@ -818,13 +812,14 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
receiver, details);
receiver->RequireSlowElements(*dictionary);
- if (receiver->HasSlowArgumentsElements()) {
- FixedArray parameter_map = FixedArray::cast(receiver->elements());
+ if (receiver->HasSlowArgumentsElements(isolate_)) {
+ FixedArray parameter_map = FixedArray::cast(receiver->elements(isolate_));
uint32_t length = parameter_map.length() - 2;
if (number_ < length) {
- parameter_map.set(number_ + 2, ReadOnlyRoots(heap()).the_hole_value());
+ parameter_map.set(number_ + 2,
+ ReadOnlyRoots(isolate_).the_hole_value());
}
- FixedArray::cast(receiver->elements()).set(1, *dictionary);
+ FixedArray::cast(receiver->elements(isolate_)).set(1, *dictionary);
} else {
receiver->set_elements(*dictionary);
}
@@ -832,13 +827,13 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
ReloadPropertyInformation<true>();
} else {
PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES;
- if (receiver->map().is_prototype_map()) {
- JSObject::InvalidatePrototypeChains(receiver->map());
+ if (receiver->map(isolate_).is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
mode = KEEP_INOBJECT_PROPERTIES;
}
// Normalize object to make this operation simple.
- JSObject::NormalizeProperties(receiver, mode, 0,
+ JSObject::NormalizeProperties(isolate_, receiver, mode, 0,
"TransitionToAccessorPair");
JSObject::SetNormalizedProperty(receiver, name_, pair, details);
@@ -859,61 +854,54 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
// Optimization that only works if configuration_ is not mutable.
if (!check_prototype_chain()) return true;
- DisallowHeapAllocation no_gc;
if (*receiver_ == *holder_) return true;
- if (!receiver_->IsJSReceiver()) return false;
- JSReceiver current = JSReceiver::cast(*receiver_);
- JSReceiver object = *holder_;
- if (!current.map().has_hidden_prototype()) return false;
- // JSProxy do not occur as hidden prototypes.
- if (object.IsJSProxy()) return false;
- PrototypeIterator iter(isolate(), current, kStartAtPrototype,
- PrototypeIterator::END_AT_NON_HIDDEN);
- while (!iter.IsAtEnd()) {
- if (iter.GetCurrent<JSReceiver>() == object) return true;
- iter.Advance();
- }
- return false;
+ if (!receiver_->IsJSGlobalProxy(isolate_)) return false;
+ return Handle<JSGlobalProxy>::cast(receiver_)->map(isolate_).prototype(
+ isolate_) == *holder_;
}
Handle<Object> LookupIterator::FetchValue() const {
Object result;
if (IsElement()) {
Handle<JSObject> holder = GetHolder<JSObject>();
- ElementsAccessor* accessor = holder->GetElementsAccessor();
+ ElementsAccessor* accessor = holder->GetElementsAccessor(isolate_);
return accessor->Get(holder, number_);
- } else if (holder_->IsJSGlobalObject()) {
+ } else if (holder_->IsJSGlobalObject(isolate_)) {
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
- result = holder->global_dictionary().ValueAt(number_);
- } else if (!holder_->HasFastProperties()) {
- result = holder_->property_dictionary().ValueAt(number_);
+ result = holder->global_dictionary(isolate_).ValueAt(isolate_, number_);
+ } else if (!holder_->HasFastProperties(isolate_)) {
+ result = holder_->property_dictionary(isolate_).ValueAt(isolate_, number_);
} else if (property_details_.location() == kField) {
DCHECK_EQ(kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
- FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(holder->map(isolate_), number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result = holder_->map().instance_descriptors().GetStrongValue(number_);
+ result =
+ holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue(
+ isolate_, number_);
}
return handle(result, isolate_);
}
bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
DCHECK(!IsElement());
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
Handle<JSObject> holder = GetHolder<JSObject>();
- FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(holder->map(isolate_), number_);
if (property_details_.representation().IsDouble()) {
- if (!value.IsNumber()) return false;
+ if (!value.IsNumber(isolate_)) return false;
uint64_t bits;
- if (holder->IsUnboxedDoubleField(field_index)) {
+ if (holder->IsUnboxedDoubleField(isolate_, field_index)) {
bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
} else {
- Object current_value = holder->RawFastPropertyAt(field_index);
- DCHECK(current_value.IsMutableHeapNumber());
+ Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
+ DCHECK(current_value.IsMutableHeapNumber(isolate_));
bits = MutableHeapNumber::cast(current_value).value_as_bits();
}
// Use bit representation of double to to check for hole double, since
@@ -927,11 +915,11 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
}
return Object::SameNumberValue(bit_cast<double>(bits), value.Number());
} else {
- Object current_value = holder->RawFastPropertyAt(field_index);
+ Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
if (current_value.IsUninitialized(isolate()) || current_value == value) {
return true;
}
- return current_value.IsNumber() && value.IsNumber() &&
+ return current_value.IsNumber(isolate_) && value.IsNumber(isolate_) &&
Object::SameNumberValue(current_value.Number(), value.Number());
}
}
@@ -946,7 +934,7 @@ int LookupIterator::GetFieldDescriptorIndex() const {
int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kDescriptor, property_details_.location());
DCHECK_EQ(kAccessor, property_details_.kind());
return descriptor_number();
@@ -954,36 +942,38 @@ int LookupIterator::GetAccessorIndex() const {
Handle<Map> LookupIterator::GetFieldOwnerMap() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
- Map holder_map = holder_->map();
+ Map holder_map = holder_->map(isolate_);
return handle(holder_map.FindFieldOwner(isolate(), descriptor_number()),
isolate_);
}
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
- return FieldIndex::ForDescriptor(holder_->map(), descriptor_number());
+ return FieldIndex::ForDescriptor(holder_->map(isolate_), descriptor_number());
}
Handle<FieldType> LookupIterator::GetFieldType() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
return handle(
- holder_->map().instance_descriptors().GetFieldType(descriptor_number()),
+ holder_->map(isolate_).instance_descriptors(isolate_).GetFieldType(
+ isolate_, descriptor_number()),
isolate_);
}
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement());
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
- return handle(holder->global_dictionary().CellAt(dictionary_entry()),
- isolate_);
+ return handle(
+ holder->global_dictionary(isolate_).CellAt(isolate_, dictionary_entry()),
+ isolate_);
}
Handle<Object> LookupIterator::GetAccessors() const {
@@ -1003,9 +993,9 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
if (IsElement()) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ElementsAccessor* accessor = object->GetElementsAccessor();
+ ElementsAccessor* accessor = object->GetElementsAccessor(isolate_);
accessor->Set(object, number_, *value);
- } else if (holder->HasFastProperties()) {
+ } else if (holder->HasFastProperties(isolate_)) {
if (property_details_.location() == kField) {
// Check that in case of VariableMode::kConst field the existing value is
// equal to |value|.
@@ -1018,21 +1008,22 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
DCHECK_EQ(kDescriptor, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
- } else if (holder->IsJSGlobalObject()) {
+ } else if (holder->IsJSGlobalObject(isolate_)) {
GlobalDictionary dictionary =
- JSGlobalObject::cast(*holder).global_dictionary();
- dictionary.CellAt(dictionary_entry()).set_value(*value);
+ JSGlobalObject::cast(*holder).global_dictionary(isolate_);
+ dictionary.CellAt(isolate_, dictionary_entry()).set_value(*value);
} else {
- DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
- NameDictionary dictionary = holder->property_dictionary();
+ DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
+ NameDictionary dictionary = holder->property_dictionary(isolate_);
dictionary.ValueAtPut(dictionary_entry(), *value);
}
}
template <bool is_element>
bool LookupIterator::SkipInterceptor(JSObject holder) {
- auto info = GetInterceptor<is_element>(holder);
- if (!is_element && name_->IsSymbol() && !info.can_intercept_symbols()) {
+ InterceptorInfo info = GetInterceptor<is_element>(isolate_, holder);
+ if (!is_element && name_->IsSymbol(isolate_) &&
+ !info.can_intercept_symbols()) {
return true;
}
if (info.non_masking()) {
@@ -1051,18 +1042,19 @@ bool LookupIterator::SkipInterceptor(JSObject holder) {
JSReceiver LookupIterator::NextHolder(Map map) {
DisallowHeapAllocation no_gc;
- if (map.prototype() == ReadOnlyRoots(heap()).null_value()) {
+ if (map.prototype(isolate_) == ReadOnlyRoots(isolate_).null_value()) {
return JSReceiver();
}
- if (!check_prototype_chain() && !map.has_hidden_prototype()) {
+ if (!check_prototype_chain() && !map.IsJSGlobalProxyMap()) {
return JSReceiver();
}
- return JSReceiver::cast(map.prototype());
+ return JSReceiver::cast(map.prototype(isolate_));
}
LookupIterator::State LookupIterator::NotFound(JSReceiver const holder) const {
DCHECK(!IsElement());
- if (!holder.IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
+ if (!holder.IsJSTypedArray(isolate_) || !name_->IsString(isolate_))
+ return NOT_FOUND;
return IsSpecialIndex(String::cast(*name_)) ? INTEGER_INDEXED_EXOTIC
: NOT_FOUND;
}
@@ -1084,27 +1076,27 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
switch (state_) {
case NOT_FOUND:
if (map.IsJSProxyMap()) {
- if (is_element || !name_->IsPrivate()) return JSPROXY;
+ if (is_element || !name_->IsPrivate(isolate_)) return JSPROXY;
}
if (map.is_access_check_needed()) {
- if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
+ if (is_element || !name_->IsPrivate(isolate_)) return ACCESS_CHECK;
}
V8_FALLTHROUGH;
case ACCESS_CHECK:
if (check_interceptor() && HasInterceptor<is_element>(map) &&
!SkipInterceptor<is_element>(JSObject::cast(holder))) {
- if (is_element || !name_->IsPrivate()) return INTERCEPTOR;
+ if (is_element || !name_->IsPrivate(isolate_)) return INTERCEPTOR;
}
V8_FALLTHROUGH;
case INTERCEPTOR:
if (!is_element && map.IsJSGlobalObjectMap()) {
GlobalDictionary dict =
- JSGlobalObject::cast(holder).global_dictionary();
+ JSGlobalObject::cast(holder).global_dictionary(isolate_);
int number = dict.FindEntry(isolate(), name_);
if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
number_ = static_cast<uint32_t>(number);
- PropertyCell cell = dict.CellAt(number_);
- if (cell.value().IsTheHole(isolate_)) return NOT_FOUND;
+ PropertyCell cell = dict.CellAt(isolate_, number_);
+ if (cell.value(isolate_).IsTheHole(isolate_)) return NOT_FOUND;
property_details_ = cell.property_details();
has_property_ = true;
switch (property_details_.kind()) {
@@ -1136,12 +1128,13 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
if (is_element) {
JSObject js_object = JSObject::cast(holder);
- ElementsAccessor* accessor = js_object.GetElementsAccessor();
- FixedArrayBase backing_store = js_object.elements();
+ ElementsAccessor* accessor = js_object.GetElementsAccessor(isolate_);
+ FixedArrayBase backing_store = js_object.elements(isolate_);
number_ =
accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
if (number_ == kMaxUInt32) {
- return holder.IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
+ return holder.IsJSTypedArray(isolate_) ? INTEGER_INDEXED_EXOTIC
+ : NOT_FOUND;
}
property_details_ = accessor->GetDetails(js_object, number_);
if (map.has_frozen_or_sealed_elements()) {
@@ -1149,14 +1142,14 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
property_details_ = property_details_.CopyAddAttributes(attrs);
}
} else if (!map.is_dictionary_map()) {
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(isolate_);
int number = descriptors.SearchWithCache(isolate_, *name_, map);
if (number == DescriptorArray::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
property_details_ = descriptors.GetDetails(number_);
} else {
- DCHECK_IMPLIES(holder.IsJSProxy(), name()->IsPrivate());
- NameDictionary dict = holder.property_dictionary();
+ DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_));
+ NameDictionary dict = holder.property_dictionary(isolate_);
int number = dict.FindEntry(isolate(), name_);
if (number == NameDictionary::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
@@ -1191,15 +1184,15 @@ Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
bool LookupIterator::TryLookupCachedProperty() {
return state() == LookupIterator::ACCESSOR &&
- GetAccessors()->IsAccessorPair() && LookupCachedProperty();
+ GetAccessors()->IsAccessorPair(isolate_) && LookupCachedProperty();
}
bool LookupIterator::LookupCachedProperty() {
DCHECK_EQ(state(), LookupIterator::ACCESSOR);
- DCHECK(GetAccessors()->IsAccessorPair());
+ DCHECK(GetAccessors()->IsAccessorPair(isolate_));
AccessorPair accessor_pair = AccessorPair::cast(*GetAccessors());
- Handle<Object> getter(accessor_pair.getter(), isolate());
+ Handle<Object> getter(accessor_pair.getter(isolate_), isolate());
MaybeHandle<Name> maybe_name =
FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
if (maybe_name.is_null()) return false;
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index 820b8ef9b0..565ea4bb75 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -93,10 +93,6 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
bool* success, Configuration configuration = DEFAULT);
- static LookupIterator ForTransitionHandler(
- Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, MaybeHandle<Map> maybe_transition_map);
-
void Restart() {
InterceptorState state = InterceptorState::kUninitialized;
IsElement() ? RestartInternal<true>(state) : RestartInternal<false>(state);
@@ -239,7 +235,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
template <bool is_element>
bool SkipInterceptor(JSObject holder);
template <bool is_element>
- static inline InterceptorInfo GetInterceptor(JSObject holder);
+ static inline InterceptorInfo GetInterceptor(Isolate* isolate,
+ JSObject holder);
bool check_interceptor() const {
return (configuration_ & kInterceptor) != 0;
@@ -247,7 +244,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
inline int descriptor_number() const;
inline int dictionary_entry() const;
- static inline Configuration ComputeConfiguration(Configuration configuration,
+ static inline Configuration ComputeConfiguration(Isolate* isolate,
+ Configuration configuration,
Handle<Name> name);
static Handle<JSReceiver> GetRootForNonJSReceiver(
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 8c26196fb5..6a9359e3a0 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -30,20 +30,13 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
-DescriptorArray Map::instance_descriptors() const {
- return DescriptorArray::cast(READ_FIELD(*this, kInstanceDescriptorsOffset));
+DEF_GETTER(Map, instance_descriptors, DescriptorArray) {
+ return TaggedField<DescriptorArray, kInstanceDescriptorsOffset>::load(isolate,
+ *this);
}
-DescriptorArray Map::synchronized_instance_descriptors() const {
- return DescriptorArray::cast(
- ACQUIRE_READ_FIELD(*this, kInstanceDescriptorsOffset));
-}
-
-void Map::set_synchronized_instance_descriptors(DescriptorArray value,
- WriteBarrierMode mode) {
- RELEASE_WRITE_FIELD(*this, kInstanceDescriptorsOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kInstanceDescriptorsOffset, value, mode);
-}
+SYNCHRONIZED_ACCESSORS(Map, synchronized_instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
// A freshly allocated layout descriptor can be set on an existing map.
// We need to use release-store and acquire-load accessor pairs to ensure
@@ -54,6 +47,12 @@ SYNCHRONIZED_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
FLAG_unbox_double_fields)
WEAK_ACCESSORS(Map, raw_transitions, kTransitionsOrPrototypeInfoOffset)
+ACCESSORS_CHECKED2(Map, prototype, HeapObject, kPrototypeOffset, true,
+ value.IsNull() || value.IsJSReceiver())
+
+ACCESSORS_CHECKED(Map, prototype_info, Object,
+ kTransitionsOrPrototypeInfoOffset, this->is_prototype_map())
+
// |bit_field| fields.
// Concurrent access to |has_prototype_slot| and |has_non_instance_prototype|
// is explicitly whitelisted here. The former is never modified after the map
@@ -74,37 +73,35 @@ BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_prototype_slot,
Map::HasPrototypeSlotBit)
// |bit_field2| fields.
-BIT_FIELD_ACCESSORS(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
-BIT_FIELD_ACCESSORS(Map, bit_field2, is_prototype_map, Map::IsPrototypeMapBit)
-BIT_FIELD_ACCESSORS(Map, bit_field2, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, new_target_is_base,
+ Map::NewTargetIsBaseBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, is_immutable_proto,
+ Map::IsImmutablePrototypeBit)
// |bit_field3| fields.
BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors, Map::OwnsDescriptorsBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_in_retained_map_list,
Map::IsInRetainedMapListBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_prototype_map, Map::IsPrototypeMapBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, is_immutable_proto,
- Map::IsImmutablePrototypeBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, new_target_is_base,
- Map::NewTargetIsBaseBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_extensible, Map::IsExtensibleBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
Map::MayHaveInterestingSymbolsBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
Map::ConstructionCounterBits)
-InterceptorInfo Map::GetNamedInterceptor() {
+DEF_GETTER(Map, GetNamedInterceptor, InterceptorInfo) {
DCHECK(has_named_interceptor());
- FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info.GetNamedPropertyHandler());
+ FunctionTemplateInfo info = GetFunctionTemplateInfo(isolate);
+ return InterceptorInfo::cast(info.GetNamedPropertyHandler(isolate));
}
-InterceptorInfo Map::GetIndexedInterceptor() {
+DEF_GETTER(Map, GetIndexedInterceptor, InterceptorInfo) {
DCHECK(has_indexed_interceptor());
- FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info.GetIndexedPropertyHandler());
+ FunctionTemplateInfo info = GetFunctionTemplateInfo(isolate);
+ return InterceptorInfo::cast(info.GetIndexedPropertyHandler(isolate));
}
bool Map::IsMostGeneralFieldType(Representation representation,
@@ -113,7 +110,8 @@ bool Map::IsMostGeneralFieldType(Representation representation,
}
bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
- return instance_type == JS_ARRAY_TYPE || instance_type == JS_VALUE_TYPE ||
+ return instance_type == JS_ARRAY_TYPE ||
+ instance_type == JS_PRIMITIVE_WRAPPER_TYPE ||
instance_type == JS_ARGUMENTS_TYPE;
}
@@ -136,10 +134,25 @@ void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
}
}
+Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
+ PropertyNormalizationMode mode, const char* reason) {
+ return Normalize(isolate, fast_map, fast_map->elements_kind(), mode, reason);
+}
+
+bool Map::EquivalentToForNormalization(const Map other,
+ PropertyNormalizationMode mode) const {
+ return EquivalentToForNormalization(other, elements_kind(), mode);
+}
+
bool Map::IsUnboxedDoubleField(FieldIndex index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return IsUnboxedDoubleField(isolate, index);
+}
+
+bool Map::IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
if (!index.is_inobject()) return false;
- return !layout_descriptor().IsTagged(index.property_index());
+ return !layout_descriptor(isolate).IsTagged(index.property_index());
}
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
@@ -160,8 +173,8 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
}
}
-PropertyDetails Map::GetLastDescriptorDetails() const {
- return instance_descriptors().GetDetails(LastAdded());
+PropertyDetails Map::GetLastDescriptorDetails(Isolate* isolate) const {
+ return instance_descriptors(isolate).GetDetails(LastAdded());
}
int Map::LastAdded() const {
@@ -375,7 +388,7 @@ void Map::CopyUnusedPropertyFields(Map map) {
void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map map) {
int value = map.used_or_unused_instance_size_in_words();
- if (value >= JSValue::kFieldsAdded) {
+ if (value >= JSPrimitiveWrapper::kFieldsAdded) {
// Unused in-object fields. Adjust the offset from the object’s start
// so it matches the distance to the object’s end.
value += instance_size_in_words() - map.instance_size_in_words();
@@ -570,22 +583,13 @@ bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
-HeapObject Map::prototype() const {
- return HeapObject::cast(READ_FIELD(*this, kPrototypeOffset));
-}
-
-void Map::set_prototype(HeapObject value, WriteBarrierMode mode) {
- DCHECK(value.IsNull() || value.IsJSReceiver());
- WRITE_FIELD(*this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kPrototypeOffset, value, mode);
-}
-
LayoutDescriptor Map::layout_descriptor_gc_safe() const {
DCHECK(FLAG_unbox_double_fields);
// The loaded value can be dereferenced on background thread to load the
// bitmap. We need acquire load in order to ensure that the bitmap
// initializing stores are also visible to the background thread.
- Object layout_desc = ACQUIRE_READ_FIELD(*this, kLayoutDescriptorOffset);
+ Object layout_desc =
+ TaggedField<Object, kLayoutDescriptorOffset>::Acquire_Load(*this);
return LayoutDescriptor::cast_gc_safe(layout_desc);
}
@@ -593,7 +597,8 @@ bool Map::HasFastPointerLayout() const {
DCHECK(FLAG_unbox_double_fields);
// The loaded value is used for SMI check only and is not dereferenced,
// so relaxed load is safe.
- Object layout_desc = RELAXED_READ_FIELD(*this, kLayoutDescriptorOffset);
+ Object layout_desc =
+ TaggedField<Object, kLayoutDescriptorOffset>::Relaxed_Load(*this);
return LayoutDescriptor::IsFastPointerLayout(layout_desc);
}
@@ -686,36 +691,17 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
#endif
}
-HeapObject Map::GetBackPointer() const {
- Object object = constructor_or_backpointer();
- if (object.IsMap()) {
+DEF_GETTER(Map, GetBackPointer, HeapObject) {
+ Object object = constructor_or_backpointer(isolate);
+ if (object.IsMap(isolate)) {
return Map::cast(object);
}
- return GetReadOnlyRoots().undefined_value();
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ return GetReadOnlyRoots(isolate).undefined_value();
}
-Map Map::ElementsTransitionMap() {
- DisallowHeapAllocation no_gc;
- // TODO(delphick): While it's safe to pass nullptr for Isolate* here as
- // SearchSpecial doesn't need it, this is really ugly. Perhaps factor out a
- // base class for methods not requiring an Isolate?
- return TransitionsAccessor(nullptr, *this, &no_gc)
- .SearchSpecial(GetReadOnlyRoots().elements_transition_symbol());
-}
-
-Object Map::prototype_info() const {
- DCHECK(is_prototype_map());
- return READ_FIELD(*this, Map::kTransitionsOrPrototypeInfoOffset);
-}
-
-void Map::set_prototype_info(Object value, WriteBarrierMode mode) {
- CHECK(is_prototype_map());
- WRITE_FIELD(*this, Map::kTransitionsOrPrototypeInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, Map::kTransitionsOrPrototypeInfoOffset,
- value, mode);
-}
-
-void Map::SetBackPointer(Object value, WriteBarrierMode mode) {
+void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
CHECK(value.IsMap());
CHECK(GetBackPointer().IsUndefined());
@@ -724,6 +710,13 @@ void Map::SetBackPointer(Object value, WriteBarrierMode mode) {
set_constructor_or_backpointer(value, mode);
}
+// static
+Map Map::ElementsTransitionMap(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ return TransitionsAccessor(isolate, *this, &no_gc)
+ .SearchSpecial(ReadOnlyRoots(isolate).elements_transition_symbol());
+}
+
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
ACCESSORS(Map, constructor_or_backpointer, Object,
@@ -736,23 +729,24 @@ bool Map::IsPrototypeValidityCellValid() const {
return value == Smi::FromInt(Map::kPrototypeChainValid);
}
-Object Map::GetConstructor() const {
- Object maybe_constructor = constructor_or_backpointer();
+DEF_GETTER(Map, GetConstructor, Object) {
+ Object maybe_constructor = constructor_or_backpointer(isolate);
// Follow any back pointers.
- while (maybe_constructor.IsMap()) {
+ while (maybe_constructor.IsMap(isolate)) {
maybe_constructor =
- Map::cast(maybe_constructor).constructor_or_backpointer();
+ Map::cast(maybe_constructor).constructor_or_backpointer(isolate);
}
return maybe_constructor;
}
-FunctionTemplateInfo Map::GetFunctionTemplateInfo() const {
- Object constructor = GetConstructor();
- if (constructor.IsJSFunction()) {
- DCHECK(JSFunction::cast(constructor).shared().IsApiFunction());
- return JSFunction::cast(constructor).shared().get_api_func_data();
+DEF_GETTER(Map, GetFunctionTemplateInfo, FunctionTemplateInfo) {
+ Object constructor = GetConstructor(isolate);
+ if (constructor.IsJSFunction(isolate)) {
+ // TODO(ishell): IsApiFunction(isolate) and get_api_func_data(isolate)
+ DCHECK(JSFunction::cast(constructor).shared(isolate).IsApiFunction());
+ return JSFunction::cast(constructor).shared(isolate).get_api_func_data();
}
- DCHECK(constructor.IsFunctionTemplateInfo());
+ DCHECK(constructor.IsFunctionTemplateInfo(isolate));
return FunctionTemplateInfo::cast(constructor);
}
@@ -805,8 +799,8 @@ int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
-bool HeapObject::IsNormalizedMapCache() const {
- if (!IsWeakFixedArray()) return false;
+DEF_GETTER(HeapObject, IsNormalizedMapCache, bool) {
+ if (!IsWeakFixedArray(isolate)) return false;
if (WeakFixedArray::cast(*this).length() != NormalizedMapCache::kEntries) {
return false;
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 855fdabdf3..d21f0e1a12 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -201,10 +201,9 @@ void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
*old_descriptors_ == integrity_source_map_->instance_descriptors());
}
-MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
- result_map_ = Map::CopyGeneralizeAllFields(
- isolate_, old_map_, new_elements_kind_, modified_descriptor_, new_kind_,
- new_attributes_, reason);
+MapUpdater::State MapUpdater::Normalize(const char* reason) {
+ result_map_ = Map::Normalize(isolate_, old_map_, new_elements_kind_,
+ CLEAR_INOBJECT_PROPERTIES, reason);
state_ = kEnd;
return state_; // Done.
}
@@ -310,14 +309,14 @@ MapUpdater::State MapUpdater::FindRootMap() {
}
if (!old_map_->EquivalentToForTransition(*root_map_)) {
- return CopyGeneralizeAllFields("GenAll_NotEquivalent");
+ return Normalize("Normalize_NotEquivalent");
} else if (old_map_->is_extensible() != root_map_->is_extensible()) {
DCHECK(!old_map_->is_extensible());
DCHECK(root_map_->is_extensible());
// We have an integrity level transition in the tree, let us make a note
// of that transition to be able to replay it later.
if (!TrySaveIntegrityLevelTransitions()) {
- return CopyGeneralizeAllFields("GenAll_PrivateSymbolsOnNonExtensible");
+ return Normalize("Normalize_PrivateSymbolsOnNonExtensible");
}
// We want to build transitions to the original element kind (before
@@ -335,7 +334,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
!(IsTransitionableFastElementsKind(from_kind) &&
IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
- return CopyGeneralizeAllFields("GenAll_InvalidElementsTransition");
+ return Normalize("Normalize_InvalidElementsTransition");
}
int root_nof = root_map_->NumberOfOwnDescriptors();
@@ -344,13 +343,13 @@ MapUpdater::State MapUpdater::FindRootMap() {
old_descriptors_->GetDetails(modified_descriptor_);
if (old_details.kind() != new_kind_ ||
old_details.attributes() != new_attributes_) {
- return CopyGeneralizeAllFields("GenAll_RootModification1");
+ return Normalize("Normalize_RootModification1");
}
if (old_details.location() != kField) {
- return CopyGeneralizeAllFields("GenAll_RootModification2");
+ return Normalize("Normalize_RootModification2");
}
if (!new_representation_.fits_into(old_details.representation())) {
- return CopyGeneralizeAllFields("GenAll_RootModification4");
+ return Normalize("Normalize_RootModification4");
}
DCHECK_EQ(kData, old_details.kind());
@@ -394,7 +393,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
!EqualImmutableValues(GetValue(i),
tmp_descriptors->GetStrongValue(i))) {
// TODO(ishell): mutable accessors are not implemented yet.
- return CopyGeneralizeAllFields("GenAll_Incompatible");
+ return Normalize("Normalize_Incompatible");
}
if (!IsGeneralizableTo(old_details.location(), tmp_details.location())) {
break;
@@ -484,7 +483,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (old_details.kind() == kAccessor &&
!EqualImmutableValues(GetValue(i),
tmp_descriptors->GetStrongValue(i))) {
- return CopyGeneralizeAllFields("GenAll_Incompatible");
+ return Normalize("Normalize_Incompatible");
}
DCHECK(!tmp_map->is_deprecated());
target_map_ = tmp_map;
@@ -723,7 +722,7 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
// contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
if (maybe_transition.is_null() && !transitions.CanHaveMoreTransitions()) {
- return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ return Normalize("Normalize_CantHaveMoreTransitions");
}
old_map_->NotifyLeafMapLayoutChange(isolate_);
@@ -787,7 +786,7 @@ MapUpdater::State MapUpdater::ConstructNewMapWithIntegrityLevelTransition() {
TransitionsAccessor transitions(isolate_, target_map_);
if (!transitions.CanHaveMoreTransitions()) {
- return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ return Normalize("Normalize_CantHaveMoreTransitions");
}
result_map_ = Map::CopyForPreventExtensions(
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
index 3ba86eacbc..6ee373cbdf 100644
--- a/deps/v8/src/objects/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -123,9 +123,8 @@ class MapUpdater {
State ConstructNewMapWithIntegrityLevelTransition();
// When a requested reconfiguration can not be done the result is a copy
- // of |old_map_| where every field has |Tagged| representation and |Any|
- // field type. This map is disconnected from the transition tree.
- State CopyGeneralizeAllFields(const char* reason);
+ // of |old_map_| in dictionary mode.
+ State Normalize(const char* reason);
// Returns name of a |descriptor| property.
inline Name GetKey(int descriptor) const;
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 43d8c305c5..7b4f1abd05 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -85,6 +85,21 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
os << "]\n";
}
+Map Map::GetStructMap(Isolate* isolate, InstanceType type) {
+ Map map;
+ switch (type) {
+#define MAKE_CASE(TYPE, Name, name) \
+ case TYPE: \
+ map = ReadOnlyRoots(isolate).name##_map(); \
+ break;
+ STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+ default:
+ UNREACHABLE();
+ }
+ return map;
+}
+
VisitorId Map::GetVisitorId(Map map) {
STATIC_ASSERT(kVisitorIdCount <= 256);
@@ -262,7 +277,7 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_TYPE:
@@ -337,12 +352,20 @@ VisitorId Map::GetVisitorId(Map map) {
if (instance_type == WASM_CAPI_FUNCTION_DATA_TYPE) {
return kVisitWasmCapiFunctionData;
}
+ if (instance_type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
+ return kVisitWasmIndirectFunctionTable;
+ }
return kVisitStruct;
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
return kVisitDataHandler;
+ case SOURCE_TEXT_MODULE_TYPE:
+ return kVisitSourceTextModule;
+ case SYNTHETIC_MODULE_TYPE:
+ return kVisitSyntheticModule;
+
default:
UNREACHABLE();
}
@@ -458,7 +481,7 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
return MaybeHandle<Map>();
}
- Representation representation = constant->OptimalRepresentation();
+ Representation representation = constant->OptimalRepresentation(isolate);
Handle<FieldType> type = constant->OptimalType(isolate, representation);
return CopyWithField(isolate, map, name, type, attributes,
PropertyConstness::kConst, representation, flag);
@@ -570,61 +593,6 @@ bool Map::HasOutOfObjectProperties() const {
return GetInObjectProperties() < NumberOfFields();
}
-Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
- ElementsKind elements_kind,
- int modify_index, PropertyKind kind,
- PropertyAttributes attributes,
- const char* reason) {
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descriptors = DescriptorArray::CopyUpTo(
- isolate, old_descriptors, number_of_own_descriptors);
- descriptors->GeneralizeAllFields();
-
- Handle<LayoutDescriptor> new_layout_descriptor(
- LayoutDescriptor::FastPointerLayout(), isolate);
- Handle<Map> new_map = CopyReplaceDescriptors(
- isolate, map, descriptors, new_layout_descriptor, OMIT_TRANSITION,
- MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
-
- // Unless the instance is being migrated, ensure that modify_index is a field.
- if (modify_index >= 0) {
- PropertyDetails details = descriptors->GetDetails(modify_index);
- if (details.constness() != PropertyConstness::kMutable ||
- details.location() != kField || details.attributes() != attributes) {
- int field_index = details.location() == kField
- ? details.field_index()
- : new_map->NumberOfFields();
- Descriptor d = Descriptor::DataField(
- isolate, handle(descriptors->GetKey(modify_index), isolate),
- field_index, attributes, Representation::Tagged());
- descriptors->Replace(modify_index, &d);
- if (details.location() != kField) {
- new_map->AccountAddedPropertyField();
- }
- } else {
- DCHECK(details.attributes() == attributes);
- }
-
- if (FLAG_trace_generalization) {
- MaybeHandle<FieldType> field_type = FieldType::None(isolate);
- if (details.location() == kField) {
- field_type = handle(
- map->instance_descriptors().GetFieldType(modify_index), isolate);
- }
- map->PrintGeneralization(
- isolate, stdout, reason, modify_index,
- new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(),
- details.location() == kDescriptor, details.representation(),
- Representation::Tagged(), details.constness(), details.constness(),
- field_type, MaybeHandle<Object>(), FieldType::Any(isolate),
- MaybeHandle<Object>());
- }
- }
- new_map->set_elements_kind(elements_kind);
- return new_map;
-}
-
void Map::DeprecateTransitionTree(Isolate* isolate) {
if (is_deprecated()) return;
DisallowHeapAllocation no_gc;
@@ -648,7 +616,8 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
LayoutDescriptor new_layout_descriptor) {
// Don't overwrite the empty descriptor array or initial map's descriptors.
- if (NumberOfOwnDescriptors() == 0 || GetBackPointer().IsUndefined(isolate)) {
+ if (NumberOfOwnDescriptors() == 0 ||
+ GetBackPointer(isolate).IsUndefined(isolate)) {
return;
}
@@ -659,8 +628,8 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
Map current = *this;
MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
to_replace.number_of_descriptors());
- while (current.instance_descriptors() == to_replace) {
- Object next = current.GetBackPointer();
+ while (current.instance_descriptors(isolate) == to_replace) {
+ Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.SetEnumLength(kInvalidEnumCacheSentinel);
current.UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
@@ -673,7 +642,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
Map Map::FindRootMap(Isolate* isolate) const {
Map result = *this;
while (true) {
- Object back = result.GetBackPointer();
+ Object back = result.GetBackPointer(isolate);
if (back.IsUndefined(isolate)) {
// Initial map always owns descriptors and doesn't have unused entries
// in the descriptor array.
@@ -688,10 +657,11 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(kField, instance_descriptors().GetDetails(descriptor).location());
+ DCHECK_EQ(kField,
+ instance_descriptors(isolate).GetDetails(descriptor).location());
Map result = *this;
while (true) {
- Object back = result.GetBackPointer();
+ Object back = result.GetBackPointer(isolate);
if (back.IsUndefined(isolate)) break;
const Map parent = Map::cast(back);
if (parent.NumberOfOwnDescriptors() <= descriptor) break;
@@ -927,7 +897,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// Figure out the most restrictive integrity level transition (it should
// be the last one in the transition tree).
DCHECK(!map.is_extensible());
- Map previous = Map::cast(map.GetBackPointer());
+ Map previous = Map::cast(map.GetBackPointer(isolate));
TransitionsAccessor last_transitions(isolate, previous, no_allocation);
if (!last_transitions.HasIntegrityLevelTransitionTo(
map, &(info.integrity_level_symbol), &(info.integrity_level))) {
@@ -945,7 +915,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// transitions. If we encounter any non-integrity level transition interleaved
// with integrity level transitions, just bail out.
while (!source_map.is_extensible()) {
- previous = Map::cast(source_map.GetBackPointer());
+ previous = Map::cast(source_map.GetBackPointer(isolate));
TransitionsAccessor transitions(isolate, previous, no_allocation);
if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
return info;
@@ -1234,9 +1204,9 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
// Starting from the next existing elements kind transition try to
// replay the property transitions that does not involve instance rewriting
// (ElementsTransitionAndStoreStub does not support that).
- for (root_map = root_map.ElementsTransitionMap();
+ for (root_map = root_map.ElementsTransitionMap(isolate);
!root_map.is_null() && root_map.has_fast_elements();
- root_map = root_map.ElementsTransitionMap()) {
+ root_map = root_map.ElementsTransitionMap(isolate)) {
// If root_map's elements kind doesn't match any of the elements kind in
// the candidates there is no need to do any additional work.
if (!HasElementsKind(candidates, root_map.elements_kind())) continue;
@@ -1263,7 +1233,7 @@ static Map FindClosestElementsTransition(Isolate* isolate, Map map,
ElementsKind kind = map.elements_kind();
while (kind != to_kind) {
- Map next_map = current_map.ElementsTransitionMap();
+ Map next_map = current_map.ElementsTransitionMap(isolate);
if (next_map.is_null()) return current_map;
kind = next_map.elements_kind();
current_map = next_map;
@@ -1401,25 +1371,23 @@ int Map::NumberOfEnumerableProperties() const {
}
int Map::NextFreePropertyIndex() const {
- int free_index = 0;
int number_of_own_descriptors = NumberOfOwnDescriptors();
DescriptorArray descs = instance_descriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
+ // Search properties backwards to find the last field.
+ for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
- int candidate = details.field_index() + details.field_width_in_words();
- if (candidate > free_index) free_index = candidate;
+ return details.field_index() + details.field_width_in_words();
}
}
- return free_index;
+ return 0;
}
bool Map::OnlyHasSimpleProperties() const {
// Wrapped string elements aren't explicitly stored in the elements backing
// store, but are loaded indirectly from the underlying string.
return !IsStringWrapperElementsKind(elements_kind()) &&
- !IsSpecialReceiverMap() && !has_hidden_prototype() &&
- !is_dictionary_map();
+ !IsSpecialReceiverMap() && !is_dictionary_map();
}
bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
@@ -1478,6 +1446,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
}
Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
+ ElementsKind new_elements_kind,
PropertyNormalizationMode mode, const char* reason) {
DCHECK(!fast_map->is_dictionary_map());
@@ -1489,7 +1458,8 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache);
Handle<Map> new_map;
- if (use_cache && cache->Get(fast_map, mode).ToHandle(&new_map)) {
+ if (use_cache &&
+ cache->Get(fast_map, new_elements_kind, mode).ToHandle(&new_map)) {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) new_map->DictionaryMapVerify(isolate);
#endif
@@ -1499,6 +1469,7 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
// except for the code cache, which can contain some ICs which can be
// applied to the shared map, dependent code and weak cell cache.
Handle<Map> fresh = Map::CopyNormalized(isolate, fast_map, mode);
+ fresh->set_elements_kind(new_elements_kind);
STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
Map::kDependentCodeOffset + kTaggedSize);
@@ -1508,8 +1479,12 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
// The IsInRetainedMapListBit might be different if the {new_map}
// that we got from the {cache} was already embedded into optimized
// code somewhere.
- DCHECK_EQ(fresh->bit_field3() & ~IsInRetainedMapListBit::kMask,
- new_map->bit_field3() & ~IsInRetainedMapListBit::kMask);
+ // The IsMigrationTargetBit might be different if the {new_map} from
+ // {cache} has already been marked as a migration target.
+ constexpr int ignored_bit_field3_bits =
+ IsInRetainedMapListBit::kMask | IsMigrationTargetBit::kMask;
+ DCHECK_EQ(fresh->bit_field3() & ~ignored_bit_field3_bits,
+ new_map->bit_field3() & ~ignored_bit_field3_bits);
int offset = Map::kBitField3Offset + kInt32Size;
DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
reinterpret_cast<void*>(new_map->address() + offset),
@@ -1530,13 +1505,14 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
#endif
} else {
new_map = Map::CopyNormalized(isolate, fast_map, mode);
+ new_map->set_elements_kind(new_elements_kind);
if (use_cache) {
cache->Set(fast_map, new_map);
isolate->counters()->maps_normalized()->Increment();
}
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
- }
+ }
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
}
fast_map->NotifyLeafMapLayoutChange(isolate);
return new_map;
@@ -1870,7 +1846,7 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
DCHECK_EQ(map->FindRootMap(isolate).NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
- maybe_elements_transition_map = map->ElementsTransitionMap();
+ maybe_elements_transition_map = map->ElementsTransitionMap(isolate);
DCHECK(
maybe_elements_transition_map.is_null() ||
(maybe_elements_transition_map.elements_kind() == DICTIONARY_ELEMENTS &&
@@ -2093,7 +2069,7 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
PropertyAttributes attributes =
map->instance_descriptors().GetDetails(descriptor).attributes();
- Representation representation = value->OptimalRepresentation();
+ Representation representation = value->OptimalRepresentation(isolate);
Handle<FieldType> type = value->OptimalType(isolate, representation);
MapUpdater mu(isolate, map);
@@ -2108,11 +2084,11 @@ Handle<Map> Map::PrepareForDataProperty(Isolate* isolate, Handle<Map> map,
int descriptor,
PropertyConstness constness,
Handle<Object> value) {
+ // Update to the newest map before storing the property.
+ map = Update(isolate, map);
// Dictionaries can store any property value.
DCHECK(!map->is_dictionary_map());
- // Update to the newest map before storing the property.
- return UpdateDescriptorForValue(isolate, Update(isolate, map), descriptor,
- constness, value);
+ return UpdateDescriptorForValue(isolate, map, descriptor, constness, value);
}
Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
@@ -2152,7 +2128,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
if (!map->TooManyFastProperties(store_origin)) {
- Representation representation = value->OptimalRepresentation();
+ Representation representation = value->OptimalRepresentation(isolate);
Handle<FieldType> type = value->OptimalType(isolate, representation);
maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
constness, representation, flag);
@@ -2204,16 +2180,16 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
int descriptor, PropertyKind kind,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ PropertyConstness constness) {
// Dictionaries have to be reconfigured in-place.
DCHECK(!map->is_dictionary_map());
if (!map->GetBackPointer().IsMap()) {
// There is no benefit from reconstructing transition tree for maps without
- // back pointers.
- return CopyGeneralizeAllFields(isolate, map, map->elements_kind(),
- descriptor, kind, attributes,
- "GenAll_AttributesMismatchProtoMap");
+ // back pointers, normalize and try to hit the map cache instead.
+ return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
+ "Normalize_AttributesMismatchProtoMap");
}
if (FLAG_trace_generalization) {
@@ -2223,7 +2199,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
MapUpdater mu(isolate, map);
DCHECK_EQ(kData, kind); // Only kData case is supported so far.
Handle<Map> new_map = mu.ReconfigureToDataField(
- descriptor, attributes, PropertyConstness::kConst, Representation::None(),
+ descriptor, attributes, constness, Representation::None(),
FieldType::None(isolate));
return new_map;
}
@@ -2243,12 +2219,12 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
DCHECK(name->IsUniqueName());
- // Dictionary maps can always have additional data properties.
- if (map->is_dictionary_map()) return map;
-
// Migrate to the newest map before transitioning to the new property.
map = Update(isolate, map);
+ // Dictionary maps can always have additional data properties.
+ if (map->is_dictionary_map()) return map;
+
PropertyNormalizationMode mode = map->is_prototype_map()
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
@@ -2433,8 +2409,7 @@ bool CheckEquivalent(const Map first, const Map second) {
first.instance_type() == second.instance_type() &&
first.bit_field() == second.bit_field() &&
first.is_extensible() == second.is_extensible() &&
- first.new_target_is_base() == second.new_target_is_base() &&
- first.has_hidden_prototype() == second.has_hidden_prototype();
+ first.new_target_is_base() == second.new_target_is_base();
}
} // namespace
@@ -2442,7 +2417,6 @@ bool CheckEquivalent(const Map first, const Map second) {
bool Map::EquivalentToForTransition(const Map other) const {
CHECK_EQ(GetConstructor(), other.GetConstructor());
CHECK_EQ(instance_type(), other.instance_type());
- CHECK_EQ(has_hidden_prototype(), other.has_hidden_prototype());
if (bit_field() != other.bit_field()) return false;
if (new_target_is_base() != other.new_target_is_base()) return false;
@@ -2477,10 +2451,16 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
}
bool Map::EquivalentToForNormalization(const Map other,
+ ElementsKind elements_kind,
PropertyNormalizationMode mode) const {
int properties =
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other.GetInObjectProperties();
- return CheckEquivalent(*this, other) && bit_field2() == other.bit_field2() &&
+ // Make sure the elements_kind bits are in bit_field2.
+ DCHECK_EQ(this->elements_kind(), Map::ElementsKindBits::decode(bit_field2()));
+ int adjusted_other_bit_field2 =
+ Map::ElementsKindBits::update(other.bit_field2(), elements_kind);
+ return CheckEquivalent(*this, other) &&
+ bit_field2() == adjusted_other_bit_field2 &&
GetInObjectProperties() == properties &&
JSObject::GetEmbedderFieldCount(*this) ==
JSObject::GetEmbedderFieldCount(other);
@@ -2639,7 +2619,6 @@ void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
} else {
DCHECK(prototype->IsNull(isolate) || prototype->IsJSProxy());
}
- map->set_has_hidden_prototype(prototype->IsJSGlobalObject());
WriteBarrierMode wb_mode =
prototype->IsNull(isolate) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
@@ -2672,6 +2651,7 @@ Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
}
MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
+ ElementsKind elements_kind,
PropertyNormalizationMode mode) {
DisallowHeapAllocation no_gc;
MaybeObject value = WeakFixedArray::Get(GetIndex(fast_map));
@@ -2681,7 +2661,8 @@ MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
}
Map normalized_map = Map::cast(heap_object);
- if (!normalized_map.EquivalentToForNormalization(*fast_map, mode)) {
+ if (!normalized_map.EquivalentToForNormalization(*fast_map, elements_kind,
+ mode)) {
return MaybeHandle<Map>();
}
return handle(normalized_map, GetIsolate());
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 814f8ed3be..c9da19b3e3 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -65,13 +65,16 @@ enum InstanceType : uint16_t;
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
V(Struct) \
V(Symbol) \
+ V(SyntheticModule) \
V(ThinString) \
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
V(WasmCapiFunctionData) \
+ V(WasmIndirectFunctionTable) \
V(WasmInstanceObject) \
V(WeakArray) \
V(WeakCell)
@@ -138,22 +141,22 @@ using MapHandles = std::vector<Handle<Map>>;
// | | - has_prototype_slot (bit 7) |
// +----------+---------------------------------------------+
// | Byte | [bit_field2] |
-// | | - is_extensible (bit 0) |
-// | | - is_prototype_map (bit 1) |
-// | | - has_hidden_prototype (bit 2) |
+// | | - new_target_is_base (bit 0) |
+// | | - is_immutable_proto (bit 1) |
+// | | - unused bit (bit 2) |
// | | - elements_kind (bits 3..7) |
// +----+----------+---------------------------------------------+
// | Int | [bit_field3] |
// | | - enum_length (bit 0..9) |
// | | - number_of_own_descriptors (bit 10..19) |
-// | | - is_dictionary_map (bit 20) |
-// | | - owns_descriptors (bit 21) |
-// | | - is_in_retained_map_list (bit 22) |
-// | | - is_deprecated (bit 23) |
-// | | - is_unstable (bit 24) |
-// | | - is_migration_target (bit 25) |
-// | | - is_immutable_proto (bit 26) |
-// | | - new_target_is_base (bit 27) |
+// | | - is_prototype_map (bit 20) |
+// | | - is_dictionary_map (bit 21) |
+// | | - owns_descriptors (bit 22) |
+// | | - is_in_retained_map_list (bit 23) |
+// | | - is_deprecated (bit 24) |
+// | | - is_unstable (bit 25) |
+// | | - is_migration_target (bit 26) |
+// | | - is_extensible (bit 28) |
// | | - may_have_interesting_symbols (bit 28) |
// | | - construction_counter (bit 29..31) |
// | | |
@@ -212,8 +215,8 @@ class Map : public HeapObject {
Handle<Map> map, Handle<Context> native_context);
// Retrieve interceptors.
- inline InterceptorInfo GetNamedInterceptor();
- inline InterceptorInfo GetIndexedInterceptor();
+ DECL_GETTER(GetNamedInterceptor, InterceptorInfo)
+ DECL_GETTER(GetIndexedInterceptor, InterceptorInfo)
// Instance type.
DECL_PRIMITIVE_ACCESSORS(instance_type, InstanceType)
@@ -265,10 +268,10 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
// Bit positions for |bit_field2|.
-#define MAP_BIT_FIELD2_FIELDS(V, _) \
- V(IsExtensibleBit, bool, 1, _) \
- V(IsPrototypeMapBit, bool, 1, _) \
- V(HasHiddenPrototypeBit, bool, 1, _) \
+#define MAP_BIT_FIELD2_FIELDS(V, _) \
+ V(NewTargetIsBaseBit, bool, 1, _) \
+ V(IsImmutablePrototypeBit, bool, 1, _) \
+ V(UnusedBit, bool, 1, _) \
V(ElementsKindBits, ElementsKind, 5, _)
DEFINE_BIT_FIELDS(MAP_BIT_FIELD2_FIELDS)
@@ -287,14 +290,14 @@ class Map : public HeapObject {
#define MAP_BIT_FIELD3_FIELDS(V, _) \
V(EnumLengthBits, int, kDescriptorIndexBitCount, _) \
V(NumberOfOwnDescriptorsBits, int, kDescriptorIndexBitCount, _) \
+ V(IsPrototypeMapBit, bool, 1, _) \
V(IsDictionaryMapBit, bool, 1, _) \
V(OwnsDescriptorsBit, bool, 1, _) \
V(IsInRetainedMapListBit, bool, 1, _) \
V(IsDeprecatedBit, bool, 1, _) \
V(IsUnstableBit, bool, 1, _) \
V(IsMigrationTargetBit, bool, 1, _) \
- V(IsImmutablePrototypeBit, bool, 1, _) \
- V(NewTargetIsBaseBit, bool, 1, _) \
+ V(IsExtensibleBit, bool, 1, _) \
V(MayHaveInterestingSymbolsBit, bool, 1, _) \
V(ConstructionCounterBits, int, 3, _)
@@ -378,9 +381,6 @@ class Map : public HeapObject {
DECL_BOOLEAN_ACCESSORS(has_prototype_slot)
- // Tells whether the instance with this map has a hidden prototype.
- DECL_BOOLEAN_ACCESSORS(has_hidden_prototype)
-
// Records and queries whether the instance has a named interceptor.
DECL_BOOLEAN_ACCESSORS(has_named_interceptor)
@@ -431,7 +431,7 @@ class Map : public HeapObject {
// map with DICTIONARY_ELEMENTS was found in the prototype chain.
bool DictionaryElementsInPrototypeChainOnly(Isolate* isolate);
- inline Map ElementsTransitionMap();
+ inline Map ElementsTransitionMap(Isolate* isolate);
inline FixedArrayBase GetInitialElements() const;
@@ -545,9 +545,14 @@ class Map : public HeapObject {
V8_EXPORT_PRIVATE static Handle<Map> Normalize(Isolate* isolate,
Handle<Map> map,
+ ElementsKind new_elements_kind,
PropertyNormalizationMode mode,
const char* reason);
+ inline static Handle<Map> Normalize(Isolate* isolate, Handle<Map> fast_map,
+ PropertyNormalizationMode mode,
+ const char* reason);
+
// Tells whether the map is used for JSObjects in dictionary mode (ie
// normalized objects, ie objects for which HasFastProperties returns false).
// A map can never be used for both dictionary mode and fast mode JSObjects.
@@ -573,19 +578,18 @@ class Map : public HeapObject {
// Returns null_value if there's neither a constructor function nor a
// FunctionTemplateInfo available.
DECL_ACCESSORS(constructor_or_backpointer, Object)
- inline Object GetConstructor() const;
- inline FunctionTemplateInfo GetFunctionTemplateInfo() const;
+ DECL_GETTER(GetConstructor, Object)
+ DECL_GETTER(GetFunctionTemplateInfo, FunctionTemplateInfo)
inline void SetConstructor(Object constructor,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with the constructor (see above).
- inline HeapObject GetBackPointer() const;
- inline void SetBackPointer(Object value,
+ DECL_GETTER(GetBackPointer, HeapObject)
+ inline void SetBackPointer(HeapObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
- inline DescriptorArray instance_descriptors() const;
- inline DescriptorArray synchronized_instance_descriptors() const;
+ DECL_GETTER(instance_descriptors, DescriptorArray)
V8_EXPORT_PRIVATE void SetInstanceDescriptors(Isolate* isolate,
DescriptorArray descriptors,
int number_of_own_descriptors);
@@ -629,7 +633,7 @@ class Map : public HeapObject {
// chain state.
inline bool IsPrototypeValidityCellValid() const;
- inline PropertyDetails GetLastDescriptorDetails() const;
+ inline PropertyDetails GetLastDescriptorDetails(Isolate* isolate) const;
inline int LastAdded() const;
@@ -742,7 +746,7 @@ class Map : public HeapObject {
PropertyAttributes attributes);
V8_EXPORT_PRIVATE static Handle<Map> ReconfigureExistingProperty(
Isolate* isolate, Handle<Map> map, int descriptor, PropertyKind kind,
- PropertyAttributes attributes);
+ PropertyAttributes attributes, PropertyConstness constness);
inline void AppendDescriptor(Isolate* isolate, Descriptor* desc);
@@ -794,6 +798,8 @@ class Map : public HeapObject {
inline bool CanTransition() const;
+ static Map GetStructMap(Isolate* isolate, InstanceType type);
+
#define DECL_TESTER(Type, ...) inline bool Is##Type##Map() const;
INSTANCE_TYPE_CHECKERS(DECL_TESTER)
#undef DECL_TESTER
@@ -836,15 +842,19 @@ class Map : public HeapObject {
class BodyDescriptor;
- // Compares this map to another to see if they describe equivalent objects.
+ // Compares this map to another to see if they describe equivalent objects,
+ // up to the given |elements_kind|.
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
// The "shared" flags of both this map and |other| are ignored.
- bool EquivalentToForNormalization(const Map other,
+ bool EquivalentToForNormalization(const Map other, ElementsKind elements_kind,
PropertyNormalizationMode mode) const;
+ inline bool EquivalentToForNormalization(
+ const Map other, PropertyNormalizationMode mode) const;
// Returns true if given field is unboxed double.
inline bool IsUnboxedDoubleField(FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const;
void PrintMapDetails(std::ostream& os);
@@ -932,14 +942,6 @@ class Map : public HeapObject {
static Handle<Map> CopyNormalized(Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode);
- // TODO(ishell): Move to MapUpdater.
- static Handle<Map> CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
- ElementsKind elements_kind,
- int modify_index,
- PropertyKind kind,
- PropertyAttributes attributes,
- const char* reason);
-
void DeprecateTransitionTree(Isolate* isolate);
void ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
@@ -966,13 +968,13 @@ class Map : public HeapObject {
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
- inline void set_synchronized_instance_descriptors(
- DescriptorArray array, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ DECL_ACCESSORS(synchronized_instance_descriptors, DescriptorArray)
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
friend class MapUpdater;
+ friend class ConcurrentMarkingVisitor;
OBJECT_CONSTRUCTORS(Map, HeapObject);
};
@@ -986,6 +988,7 @@ class NormalizedMapCache : public WeakFixedArray {
static Handle<NormalizedMapCache> New(Isolate* isolate);
V8_WARN_UNUSED_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
+ ElementsKind elements_kind,
PropertyNormalizationMode mode);
void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
@@ -993,7 +996,7 @@ class NormalizedMapCache : public WeakFixedArray {
DECL_VERIFIER(NormalizedMapCache)
private:
- friend bool HeapObject::IsNormalizedMapCache() const;
+ friend bool HeapObject::IsNormalizedMapCache(Isolate* isolate) const;
static const int kEntries = 64;
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index a1645c0604..304cf90d28 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -30,6 +30,10 @@ class MaybeObject : public TaggedImpl<HeapObjectReferenceType::WEAK, Address> {
#ifdef VERIFY_HEAP
static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
+
+ private:
+ template <typename TFieldType, int kFieldOffset>
+ friend class TaggedField;
};
// A HeapObjectReference is either a strong reference to a HeapObject, a weak
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index a3bc31b63a..1ab9b9fb04 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -6,9 +6,12 @@
#define V8_OBJECTS_MODULE_INL_H_
#include "src/objects/module.h"
+#include "src/objects/source-text-module.h"
+#include "src/objects/synthetic-module.h"
#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/scope-info.h"
+#include "src/objects/string-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,74 +19,86 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Module, Struct)
-OBJECT_CONSTRUCTORS_IMPL(ModuleInfoEntry, Struct)
+OBJECT_CONSTRUCTORS_IMPL(Module, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(SourceTextModule, Module)
+OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry, Struct)
+OBJECT_CONSTRUCTORS_IMPL(SyntheticModule, Module)
OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace, JSObject)
NEVER_READ_ONLY_SPACE_IMPL(Module)
+NEVER_READ_ONLY_SPACE_IMPL(SourceTextModule)
+NEVER_READ_ONLY_SPACE_IMPL(SyntheticModule)
CAST_ACCESSOR(Module)
-ACCESSORS(Module, code, Object, kCodeOffset)
+CAST_ACCESSOR(SourceTextModule)
+CAST_ACCESSOR(SyntheticModule)
ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
-ACCESSORS(Module, regular_exports, FixedArray, kRegularExportsOffset)
-ACCESSORS(Module, regular_imports, FixedArray, kRegularImportsOffset)
ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
-ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
-ACCESSORS(Module, script, Script, kScriptOffset)
ACCESSORS(Module, exception, Object, kExceptionOffset)
-ACCESSORS(Module, import_meta, Object, kImportMetaOffset)
SMI_ACCESSORS(Module, status, kStatusOffset)
-SMI_ACCESSORS(Module, dfs_index, kDfsIndexOffset)
-SMI_ACCESSORS(Module, dfs_ancestor_index, kDfsAncestorIndexOffset)
SMI_ACCESSORS(Module, hash, kHashOffset)
-ModuleInfo Module::info() const {
+ACCESSORS(SourceTextModule, code, Object, kCodeOffset)
+ACCESSORS(SourceTextModule, regular_exports, FixedArray, kRegularExportsOffset)
+ACCESSORS(SourceTextModule, regular_imports, FixedArray, kRegularImportsOffset)
+ACCESSORS(SourceTextModule, requested_modules, FixedArray,
+ kRequestedModulesOffset)
+ACCESSORS(SourceTextModule, script, Script, kScriptOffset)
+ACCESSORS(SourceTextModule, import_meta, Object, kImportMetaOffset)
+SMI_ACCESSORS(SourceTextModule, dfs_index, kDfsIndexOffset)
+SMI_ACCESSORS(SourceTextModule, dfs_ancestor_index, kDfsAncestorIndexOffset)
+
+ACCESSORS(SyntheticModule, name, String, kNameOffset)
+ACCESSORS(SyntheticModule, export_names, FixedArray, kExportNamesOffset)
+ACCESSORS(SyntheticModule, evaluation_steps, Foreign, kEvaluationStepsOffset)
+
+SourceTextModuleInfo SourceTextModule::info() const {
return (status() >= kEvaluating)
- ? ModuleInfo::cast(code())
+ ? SourceTextModuleInfo::cast(code())
: GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo();
}
CAST_ACCESSOR(JSModuleNamespace)
ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
-CAST_ACCESSOR(ModuleInfoEntry)
-ACCESSORS(ModuleInfoEntry, export_name, Object, kExportNameOffset)
-ACCESSORS(ModuleInfoEntry, local_name, Object, kLocalNameOffset)
-ACCESSORS(ModuleInfoEntry, import_name, Object, kImportNameOffset)
-SMI_ACCESSORS(ModuleInfoEntry, module_request, kModuleRequestOffset)
-SMI_ACCESSORS(ModuleInfoEntry, cell_index, kCellIndexOffset)
-SMI_ACCESSORS(ModuleInfoEntry, beg_pos, kBegPosOffset)
-SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
+CAST_ACCESSOR(SourceTextModuleInfoEntry)
+ACCESSORS(SourceTextModuleInfoEntry, export_name, Object, kExportNameOffset)
+ACCESSORS(SourceTextModuleInfoEntry, local_name, Object, kLocalNameOffset)
+ACCESSORS(SourceTextModuleInfoEntry, import_name, Object, kImportNameOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, module_request, kModuleRequestOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, cell_index, kCellIndexOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, beg_pos, kBegPosOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, end_pos, kEndPosOffset)
-OBJECT_CONSTRUCTORS_IMPL(ModuleInfo, FixedArray)
-CAST_ACCESSOR(ModuleInfo)
+OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfo, FixedArray)
+CAST_ACCESSOR(SourceTextModuleInfo)
-FixedArray ModuleInfo::module_requests() const {
+FixedArray SourceTextModuleInfo::module_requests() const {
return FixedArray::cast(get(kModuleRequestsIndex));
}
-FixedArray ModuleInfo::special_exports() const {
+FixedArray SourceTextModuleInfo::special_exports() const {
return FixedArray::cast(get(kSpecialExportsIndex));
}
-FixedArray ModuleInfo::regular_exports() const {
+FixedArray SourceTextModuleInfo::regular_exports() const {
return FixedArray::cast(get(kRegularExportsIndex));
}
-FixedArray ModuleInfo::regular_imports() const {
+FixedArray SourceTextModuleInfo::regular_imports() const {
return FixedArray::cast(get(kRegularImportsIndex));
}
-FixedArray ModuleInfo::namespace_imports() const {
+FixedArray SourceTextModuleInfo::namespace_imports() const {
return FixedArray::cast(get(kNamespaceImportsIndex));
}
-FixedArray ModuleInfo::module_request_positions() const {
+FixedArray SourceTextModuleInfo::module_request_positions() const {
return FixedArray::cast(get(kModuleRequestPositionsIndex));
}
#ifdef DEBUG
-bool ModuleInfo::Equals(ModuleInfo other) const {
+bool SourceTextModuleInfo::Equals(SourceTextModuleInfo other) const {
return regular_exports() == other.regular_exports() &&
regular_imports() == other.regular_imports() &&
special_exports() == other.special_exports() &&
@@ -93,6 +108,30 @@ bool ModuleInfo::Equals(ModuleInfo other) const {
}
#endif
+struct ModuleHandleHash {
+ V8_INLINE size_t operator()(Handle<Module> module) const {
+ return module->hash();
+ }
+};
+
+struct ModuleHandleEqual {
+ V8_INLINE bool operator()(Handle<Module> lhs, Handle<Module> rhs) const {
+ return *lhs == *rhs;
+ }
+};
+
+class UnorderedModuleSet
+ : public std::unordered_set<Handle<Module>, ModuleHandleHash,
+ ModuleHandleEqual,
+ ZoneAllocator<Handle<Module>>> {
+ public:
+ explicit UnorderedModuleSet(Zone* zone)
+ : std::unordered_set<Handle<Module>, ModuleHandleHash, ModuleHandleEqual,
+ ZoneAllocator<Handle<Module>>>(
+ 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
+ ZoneAllocator<Handle<Module>>(zone)) {}
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index ea40989df1..4e89050360 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -14,169 +14,25 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
-#include "src/utils/ostreams.h"
#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
-struct ModuleHandleHash {
- V8_INLINE size_t operator()(Handle<Module> module) const {
- return module->hash();
- }
-};
-
-struct ModuleHandleEqual {
- V8_INLINE bool operator()(Handle<Module> lhs, Handle<Module> rhs) const {
- return *lhs == *rhs;
- }
-};
-
-struct StringHandleHash {
- V8_INLINE size_t operator()(Handle<String> string) const {
- return string->Hash();
- }
-};
-
-struct StringHandleEqual {
- V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
- return lhs->Equals(*rhs);
- }
-};
-
-class UnorderedStringSet
- : public std::unordered_set<Handle<String>, StringHandleHash,
- StringHandleEqual,
- ZoneAllocator<Handle<String>>> {
- public:
- explicit UnorderedStringSet(Zone* zone)
- : std::unordered_set<Handle<String>, StringHandleHash, StringHandleEqual,
- ZoneAllocator<Handle<String>>>(
- 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
- ZoneAllocator<Handle<String>>(zone)) {}
-};
-
-class UnorderedModuleSet
- : public std::unordered_set<Handle<Module>, ModuleHandleHash,
- ModuleHandleEqual,
- ZoneAllocator<Handle<Module>>> {
- public:
- explicit UnorderedModuleSet(Zone* zone)
- : std::unordered_set<Handle<Module>, ModuleHandleHash, ModuleHandleEqual,
- ZoneAllocator<Handle<Module>>>(
- 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
- ZoneAllocator<Handle<Module>>(zone)) {}
-};
-
-class UnorderedStringMap
- : public std::unordered_map<
- Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
- ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>> {
- public:
- explicit UnorderedStringMap(Zone* zone)
- : std::unordered_map<
- Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
- ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>>(
- 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
- ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>(
- zone)) {}
-};
-
-class Module::ResolveSet
- : public std::unordered_map<
- Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
- ModuleHandleEqual,
- ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>> {
- public:
- explicit ResolveSet(Zone* zone)
- : std::unordered_map<Handle<Module>, UnorderedStringSet*,
- ModuleHandleHash, ModuleHandleEqual,
- ZoneAllocator<std::pair<const Handle<Module>,
- UnorderedStringSet*>>>(
- 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
- ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>(
- zone)),
- zone_(zone) {}
-
- Zone* zone() const { return zone_; }
-
- private:
- Zone* zone_;
-};
-
-int Module::ExportIndex(int cell_index) {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kExport);
- return cell_index - 1;
-}
-
-int Module::ImportIndex(int cell_index) {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kImport);
- return -cell_index - 1;
-}
-
-void Module::CreateIndirectExport(Isolate* isolate, Handle<Module> module,
- Handle<String> name,
- Handle<ModuleInfoEntry> entry) {
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name).IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, name, entry);
- module->set_exports(*exports);
-}
-
-void Module::CreateExport(Isolate* isolate, Handle<Module> module,
- int cell_index, Handle<FixedArray> names) {
- DCHECK_LT(0, names->length());
- Handle<Cell> cell =
- isolate->factory()->NewCell(isolate->factory()->undefined_value());
- module->regular_exports().set(ExportIndex(cell_index), *cell);
-
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- for (int i = 0, n = names->length(); i < n; ++i) {
- Handle<String> name(String::cast(names->get(i)), isolate);
- DCHECK(exports->Lookup(name).IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, name, cell);
- }
- module->set_exports(*exports);
-}
-
-Cell Module::GetCell(int cell_index) {
- DisallowHeapAllocation no_gc;
- Object cell;
- switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
- case ModuleDescriptor::kImport:
- cell = regular_imports().get(ImportIndex(cell_index));
- break;
- case ModuleDescriptor::kExport:
- cell = regular_exports().get(ExportIndex(cell_index));
- break;
- case ModuleDescriptor::kInvalid:
- UNREACHABLE();
- }
- return Cell::cast(cell);
-}
-
-Handle<Object> Module::LoadVariable(Isolate* isolate, Handle<Module> module,
- int cell_index) {
- return handle(module->GetCell(cell_index).value(), isolate);
-}
-
-void Module::StoreVariable(Handle<Module> module, int cell_index,
- Handle<Object> value) {
- DisallowHeapAllocation no_gc;
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kExport);
- module->GetCell(cell_index).set_value(*value);
-}
-
#ifdef DEBUG
void Module::PrintStatusTransition(Status new_status) {
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Changing module status from " << status() << " to " << new_status
<< " for ";
- script().GetNameOrSourceURL().Print(os);
+ if (this->IsSourceTextModule()) {
+ Handle<Script> script(SourceTextModule::cast(*this).script(),
+ GetIsolate());
+ script->GetNameOrSourceURL().Print(os);
+ } else {
+ SyntheticModule::cast(*this).name().Print(os);
+ }
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -194,70 +50,80 @@ void Module::SetStatus(Status new_status) {
set_status(new_status);
}
+void Module::RecordError(Isolate* isolate) {
+ DisallowHeapAllocation no_alloc;
+ DCHECK(exception().IsTheHole(isolate));
+ Object the_exception = isolate->pending_exception();
+ DCHECK(!the_exception.IsTheHole(isolate));
+
+ if (this->IsSourceTextModule()) {
+ Handle<SourceTextModule> self(SourceTextModule::cast(*this), GetIsolate());
+ self->set_code(self->info());
+ }
+#ifdef DEBUG
+ PrintStatusTransition(Module::kErrored);
+#endif // DEBUG
+ set_status(Module::kErrored);
+ set_exception(the_exception);
+}
+
void Module::ResetGraph(Isolate* isolate, Handle<Module> module) {
DCHECK_NE(module->status(), kInstantiating);
DCHECK_NE(module->status(), kEvaluating);
if (module->status() != kPreInstantiating) return;
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+
+ Handle<FixedArray> requested_modules =
+ module->IsSourceTextModule()
+ ? Handle<FixedArray>(
+ Handle<SourceTextModule>::cast(module)->requested_modules(),
+ isolate)
+ : Handle<FixedArray>();
Reset(isolate, module);
- for (int i = 0; i < requested_modules->length(); ++i) {
- Handle<Object> descendant(requested_modules->get(i), isolate);
- if (descendant->IsModule()) {
- ResetGraph(isolate, Handle<Module>::cast(descendant));
- } else {
- DCHECK(descendant->IsUndefined(isolate));
+ if (module->IsSourceTextModule()) {
+ for (int i = 0; i < requested_modules->length(); ++i) {
+ Handle<Object> descendant(requested_modules->get(i), isolate);
+ if (descendant->IsModule()) {
+ ResetGraph(isolate, Handle<Module>::cast(descendant));
+ } else {
+ DCHECK(descendant->IsUndefined(isolate));
+ }
}
+ } else {
+ DCHECK(module->IsSyntheticModule());
+ // Nothing else to do here.
}
}
void Module::Reset(Isolate* isolate, Handle<Module> module) {
- Factory* factory = isolate->factory();
-
DCHECK(module->status() == kPreInstantiating ||
module->status() == kInstantiating);
DCHECK(module->exception().IsTheHole(isolate));
- DCHECK(module->import_meta().IsTheHole(isolate));
// The namespace object cannot exist, because it would have been created
// by RunInitializationCode, which is called only after this module's SCC
// succeeds instantiation.
DCHECK(!module->module_namespace().IsJSModuleNamespace());
- Handle<ObjectHashTable> exports =
- ObjectHashTable::New(isolate, module->info().RegularExportCount());
- Handle<FixedArray> regular_exports =
- factory->NewFixedArray(module->regular_exports().length());
- Handle<FixedArray> regular_imports =
- factory->NewFixedArray(module->regular_imports().length());
- Handle<FixedArray> requested_modules =
- factory->NewFixedArray(module->requested_modules().length());
-
- if (module->status() == kInstantiating) {
- module->set_code(JSFunction::cast(module->code()).shared());
- }
#ifdef DEBUG
module->PrintStatusTransition(kUninstantiated);
#endif // DEBUG
- module->set_status(kUninstantiated);
- module->set_exports(*exports);
- module->set_regular_exports(*regular_exports);
- module->set_regular_imports(*regular_imports);
- module->set_requested_modules(*requested_modules);
- module->set_dfs_index(-1);
- module->set_dfs_ancestor_index(-1);
-}
-void Module::RecordError(Isolate* isolate) {
- DisallowHeapAllocation no_alloc;
- DCHECK(exception().IsTheHole(isolate));
- Object the_exception = isolate->pending_exception();
- DCHECK(!the_exception.IsTheHole(isolate));
+ int export_count;
- set_code(info());
-#ifdef DEBUG
- PrintStatusTransition(Module::kErrored);
-#endif // DEBUG
- set_status(Module::kErrored);
- set_exception(the_exception);
+ if (module->IsSourceTextModule()) {
+ Handle<SourceTextModule> source_text_module =
+ Handle<SourceTextModule>::cast(module);
+ export_count = source_text_module->regular_exports().length();
+ SourceTextModule::Reset(isolate, source_text_module);
+ } else {
+ export_count =
+ Handle<SyntheticModule>::cast(module)->export_names().length();
+ // Nothing to do here.
+ }
+
+ Handle<ObjectHashTable> exports = ObjectHashTable::New(isolate, export_count);
+
+ module->set_exports(*exports);
+ module->set_status(kUninstantiated);
}
Object Module::GetException() {
@@ -267,46 +133,6 @@ Object Module::GetException() {
return exception();
}
-SharedFunctionInfo Module::GetSharedFunctionInfo() const {
- DisallowHeapAllocation no_alloc;
- DCHECK_NE(status(), Module::kEvaluating);
- DCHECK_NE(status(), Module::kEvaluated);
- switch (status()) {
- case kUninstantiated:
- case kPreInstantiating:
- DCHECK(code().IsSharedFunctionInfo());
- return SharedFunctionInfo::cast(code());
- case kInstantiating:
- DCHECK(code().IsJSFunction());
- return JSFunction::cast(code()).shared();
- case kInstantiated:
- DCHECK(code().IsJSGeneratorObject());
- return JSGeneratorObject::cast(code()).function().shared();
- case kEvaluating:
- case kEvaluated:
- case kErrored:
- UNREACHABLE();
- }
-
- UNREACHABLE();
-}
-
-MaybeHandle<Cell> Module::ResolveImport(Isolate* isolate, Handle<Module> module,
- Handle<String> name, int module_request,
- MessageLocation loc, bool must_resolve,
- Module::ResolveSet* resolve_set) {
- Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(module_request)), isolate);
- Handle<String> specifier(
- String::cast(module->info().module_requests().get(module_request)),
- isolate);
- MaybeHandle<Cell> result =
- Module::ResolveExport(isolate, requested_module, specifier, name, loc,
- must_resolve, resolve_set);
- DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
- return result;
-}
-
MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Handle<String> module_specifier,
Handle<String> export_name,
@@ -314,121 +140,16 @@ MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Module::ResolveSet* resolve_set) {
DCHECK_GE(module->status(), kPreInstantiating);
DCHECK_NE(module->status(), kEvaluating);
- Handle<Object> object(module->exports().Lookup(export_name), isolate);
- if (object->IsCell()) {
- // Already resolved (e.g. because it's a local export).
- return Handle<Cell>::cast(object);
- }
- // Check for cycle before recursing.
- {
- // Attempt insertion with a null string set.
- auto result = resolve_set->insert({module, nullptr});
- UnorderedStringSet*& name_set = result.first->second;
- if (result.second) {
- // |module| wasn't in the map previously, so allocate a new name set.
- Zone* zone = resolve_set->zone();
- name_set =
- new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
- } else if (name_set->count(export_name)) {
- // Cycle detected.
- if (must_resolve) {
- return isolate->Throw<Cell>(
- isolate->factory()->NewSyntaxError(
- MessageTemplate::kCyclicModuleDependency, export_name,
- module_specifier),
- &loc);
- }
- return MaybeHandle<Cell>();
- }
- name_set->insert(export_name);
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::ResolveExport(
+ isolate, Handle<SourceTextModule>::cast(module), module_specifier,
+ export_name, loc, must_resolve, resolve_set);
+ } else {
+ return SyntheticModule::ResolveExport(
+ isolate, Handle<SyntheticModule>::cast(module), module_specifier,
+ export_name, loc, must_resolve);
}
-
- if (object->IsModuleInfoEntry()) {
- // Not yet resolved indirect export.
- Handle<ModuleInfoEntry> entry = Handle<ModuleInfoEntry>::cast(object);
- Handle<String> import_name(String::cast(entry->import_name()), isolate);
- Handle<Script> script(module->script(), isolate);
- MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
-
- Handle<Cell> cell;
- if (!ResolveImport(isolate, module, import_name, entry->module_request(),
- new_loc, true, resolve_set)
- .ToHandle(&cell)) {
- DCHECK(isolate->has_pending_exception());
- return MaybeHandle<Cell>();
- }
-
- // The export table may have changed but the entry in question should be
- // unchanged.
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name).IsModuleInfoEntry());
-
- exports = ObjectHashTable::Put(exports, export_name, cell);
- module->set_exports(*exports);
- return cell;
- }
-
- DCHECK(object->IsTheHole(isolate));
- return Module::ResolveExportUsingStarExports(isolate, module,
- module_specifier, export_name,
- loc, must_resolve, resolve_set);
-}
-
-MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
- Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
- Handle<String> export_name, MessageLocation loc, bool must_resolve,
- Module::ResolveSet* resolve_set) {
- if (!export_name->Equals(ReadOnlyRoots(isolate).default_string())) {
- // Go through all star exports looking for the given name. If multiple star
- // exports provide the name, make sure they all map it to the same cell.
- Handle<Cell> unique_cell;
- Handle<FixedArray> special_exports(module->info().special_exports(),
- isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- i::Handle<i::ModuleInfoEntry> entry(
- i::ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name().IsUndefined(isolate)) {
- continue; // Indirect export.
- }
-
- Handle<Script> script(module->script(), isolate);
- MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
-
- Handle<Cell> cell;
- if (ResolveImport(isolate, module, export_name, entry->module_request(),
- new_loc, false, resolve_set)
- .ToHandle(&cell)) {
- if (unique_cell.is_null()) unique_cell = cell;
- if (*unique_cell != *cell) {
- return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
- MessageTemplate::kAmbiguousExport,
- module_specifier, export_name),
- &loc);
- }
- } else if (isolate->has_pending_exception()) {
- return MaybeHandle<Cell>();
- }
- }
-
- if (!unique_cell.is_null()) {
- // Found a unique star export for this name.
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name).IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, export_name, unique_cell);
- module->set_exports(*exports);
- return unique_cell;
- }
- }
-
- // Unresolvable.
- if (must_resolve) {
- return isolate->Throw<Cell>(
- isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
- module_specifier, export_name),
- &loc);
- }
- return MaybeHandle<Cell>();
}
bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
@@ -438,7 +159,14 @@ bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Instantiating module ";
- module->script().GetNameOrSourceURL().Print(os);
+ if (module->IsSourceTextModule()) {
+ Handle<SourceTextModule>::cast(module)
+ ->script()
+ .GetNameOrSourceURL()
+ .Print(os);
+ } else {
+ Handle<SyntheticModule>::cast(module)->name().Print(os);
+ }
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -450,7 +178,7 @@ bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
return false;
}
Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneForwardList<Handle<Module>> stack(&zone);
+ ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
unsigned dfs_index = 0;
if (!FinishInstantiate(isolate, module, &stack, &dfs_index, &zone)) {
for (auto& descendant : stack) {
@@ -474,188 +202,31 @@ bool Module::PrepareInstantiate(Isolate* isolate, Handle<Module> module,
module->SetStatus(kPreInstantiating);
STACK_CHECK(isolate, false);
- // Obtain requested modules.
- Handle<ModuleInfo> module_info(module->info(), isolate);
- Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
- for (int i = 0, length = module_requests->length(); i < length; ++i) {
- Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
- v8::Local<v8::Module> api_requested_module;
- if (!callback(context, v8::Utils::ToLocal(specifier),
- v8::Utils::ToLocal(module))
- .ToLocal(&api_requested_module)) {
- isolate->PromoteScheduledException();
- return false;
- }
- Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
- requested_modules->set(i, *requested_module);
- }
-
- // Recurse.
- for (int i = 0, length = requested_modules->length(); i < length; ++i) {
- Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
- isolate);
- if (!PrepareInstantiate(isolate, requested_module, context, callback)) {
- return false;
- }
- }
-
- // Set up local exports.
- // TODO(neis): Create regular_exports array here instead of in factory method?
- for (int i = 0, n = module_info->RegularExportCount(); i < n; ++i) {
- int cell_index = module_info->RegularExportCellIndex(i);
- Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
- isolate);
- CreateExport(isolate, module, cell_index, export_names);
- }
-
- // Partially set up indirect exports.
- // For each indirect export, we create the appropriate slot in the export
- // table and store its ModuleInfoEntry there. When we later find the correct
- // Cell in the module that actually provides the value, we replace the
- // ModuleInfoEntry by that Cell (see ResolveExport).
- Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- Handle<Object> export_name(entry->export_name(), isolate);
- if (export_name->IsUndefined(isolate)) continue; // Star export.
- CreateIndirectExport(isolate, module, Handle<String>::cast(export_name),
- entry);
- }
-
- DCHECK_EQ(module->status(), kPreInstantiating);
- return true;
-}
-
-bool Module::RunInitializationCode(Isolate* isolate, Handle<Module> module) {
- DCHECK_EQ(module->status(), kInstantiating);
- Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
- DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
- Handle<Object> receiver = isolate->factory()->undefined_value();
- Handle<Object> argv[] = {module};
- MaybeHandle<Object> maybe_generator =
- Execution::Call(isolate, function, receiver, arraysize(argv), argv);
- Handle<Object> generator;
- if (!maybe_generator.ToHandle(&generator)) {
- DCHECK(isolate->has_pending_exception());
- return false;
- }
- DCHECK_EQ(*function, Handle<JSGeneratorObject>::cast(generator)->function());
- module->set_code(*generator);
- return true;
-}
-
-bool Module::MaybeTransitionComponent(Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack,
- Status new_status) {
- DCHECK(new_status == kInstantiated || new_status == kEvaluated);
- SLOW_DCHECK(
- // {module} is on the {stack}.
- std::count_if(stack->begin(), stack->end(),
- [&](Handle<Module> m) { return *m == *module; }) == 1);
- DCHECK_LE(module->dfs_ancestor_index(), module->dfs_index());
- if (module->dfs_ancestor_index() == module->dfs_index()) {
- // This is the root of its strongly connected component.
- Handle<Module> ancestor;
- do {
- ancestor = stack->front();
- stack->pop_front();
- DCHECK_EQ(ancestor->status(),
- new_status == kInstantiated ? kInstantiating : kEvaluating);
- if (new_status == kInstantiated) {
- if (!RunInitializationCode(isolate, ancestor)) return false;
- }
- ancestor->SetStatus(new_status);
- } while (*ancestor != *module);
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::PrepareInstantiate(
+ isolate, Handle<SourceTextModule>::cast(module), context, callback);
+ } else {
+ return SyntheticModule::PrepareInstantiate(
+ isolate, Handle<SyntheticModule>::cast(module), context, callback);
}
- return true;
}
bool Module::FinishInstantiate(Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack,
+ ZoneForwardList<Handle<SourceTextModule>>* stack,
unsigned* dfs_index, Zone* zone) {
DCHECK_NE(module->status(), kEvaluating);
if (module->status() >= kInstantiating) return true;
DCHECK_EQ(module->status(), kPreInstantiating);
STACK_CHECK(isolate, false);
- // Instantiate SharedFunctionInfo and mark module as instantiating for
- // the recursion.
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
- isolate);
- Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, isolate->native_context());
- module->set_code(*function);
- module->SetStatus(kInstantiating);
- module->set_dfs_index(*dfs_index);
- module->set_dfs_ancestor_index(*dfs_index);
- stack->push_front(module);
- (*dfs_index)++;
-
- // Recurse.
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
- for (int i = 0, length = requested_modules->length(); i < length; ++i) {
- Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
- isolate);
- if (!FinishInstantiate(isolate, requested_module, stack, dfs_index, zone)) {
- return false;
- }
-
- DCHECK_NE(requested_module->status(), kEvaluating);
- DCHECK_GE(requested_module->status(), kInstantiating);
- SLOW_DCHECK(
- // {requested_module} is instantiating iff it's on the {stack}.
- (requested_module->status() == kInstantiating) ==
- std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
- return *m == *requested_module;
- }));
-
- if (requested_module->status() == kInstantiating) {
- module->set_dfs_ancestor_index(
- std::min(module->dfs_ancestor_index(),
- requested_module->dfs_ancestor_index()));
- }
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::FinishInstantiate(
+ isolate, Handle<SourceTextModule>::cast(module), stack, dfs_index,
+ zone);
+ } else {
+ return SyntheticModule::FinishInstantiate(
+ isolate, Handle<SyntheticModule>::cast(module));
}
-
- Handle<Script> script(module->script(), isolate);
- Handle<ModuleInfo> module_info(module->info(), isolate);
-
- // Resolve imports.
- Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
- for (int i = 0, n = regular_imports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
- Handle<String> name(String::cast(entry->import_name()), isolate);
- MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
- ResolveSet resolve_set(zone);
- Handle<Cell> cell;
- if (!ResolveImport(isolate, module, name, entry->module_request(), loc,
- true, &resolve_set)
- .ToHandle(&cell)) {
- return false;
- }
- module->regular_imports().set(ImportIndex(entry->cell_index()), *cell);
- }
-
- // Resolve indirect exports.
- Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- Handle<Object> name(entry->export_name(), isolate);
- if (name->IsUndefined(isolate)) continue; // Star export.
- MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
- ResolveSet resolve_set(zone);
- if (ResolveExport(isolate, module, Handle<String>(),
- Handle<String>::cast(name), loc, true, &resolve_set)
- .is_null()) {
- return false;
- }
- }
-
- return MaybeTransitionComponent(isolate, module, stack, kInstantiated);
}
MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
@@ -663,7 +234,14 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Evaluating module ";
- module->script().GetNameOrSourceURL().Print(os);
+ if (module->IsSourceTextModule()) {
+ Handle<SourceTextModule>::cast(module)
+ ->script()
+ .GetNameOrSourceURL()
+ .Print(os);
+ } else {
+ Handle<SyntheticModule>::cast(module)->name().Print(os);
+ }
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -677,7 +255,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
DCHECK_GE(module->status(), kInstantiated);
Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneForwardList<Handle<Module>> stack(&zone);
+ ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
unsigned dfs_index = 0;
Handle<Object> result;
if (!Evaluate(isolate, module, &stack, &dfs_index).ToHandle(&result)) {
@@ -693,9 +271,9 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
return result;
}
-MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack,
- unsigned* dfs_index) {
+MaybeHandle<Object> Module::Evaluate(
+ Isolate* isolate, Handle<Module> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) {
if (module->status() == kErrored) {
isolate->Throw(module->GetException());
return MaybeHandle<Object>();
@@ -706,134 +284,13 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
DCHECK_EQ(module->status(), kInstantiated);
STACK_CHECK(isolate, MaybeHandle<Object>());
- Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()),
- isolate);
- module->set_code(
- generator->function().shared().scope_info().ModuleDescriptorInfo());
- module->SetStatus(kEvaluating);
- module->set_dfs_index(*dfs_index);
- module->set_dfs_ancestor_index(*dfs_index);
- stack->push_front(module);
- (*dfs_index)++;
-
- // Recursion.
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
- for (int i = 0, length = requested_modules->length(); i < length; ++i) {
- Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
- isolate);
- RETURN_ON_EXCEPTION(
- isolate, Evaluate(isolate, requested_module, stack, dfs_index), Object);
-
- DCHECK_GE(requested_module->status(), kEvaluating);
- DCHECK_NE(requested_module->status(), kErrored);
- SLOW_DCHECK(
- // {requested_module} is evaluating iff it's on the {stack}.
- (requested_module->status() == kEvaluating) ==
- std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
- return *m == *requested_module;
- }));
-
- if (requested_module->status() == kEvaluating) {
- module->set_dfs_ancestor_index(
- std::min(module->dfs_ancestor_index(),
- requested_module->dfs_ancestor_index()));
- }
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::Evaluate(
+ isolate, Handle<SourceTextModule>::cast(module), stack, dfs_index);
+ } else {
+ return SyntheticModule::Evaluate(isolate,
+ Handle<SyntheticModule>::cast(module));
}
-
- // Evaluation of module body.
- Handle<JSFunction> resume(
- isolate->native_context()->generator_next_internal(), isolate);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
- Object);
- DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
-
- CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
- return handle(JSIteratorResult::cast(*result).value(), isolate);
-}
-
-namespace {
-
-void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
- UnorderedModuleSet* visited) {
- DCHECK_GE(module->status(), Module::kInstantiating);
-
- if (module->module_namespace().IsJSModuleNamespace()) return; // Shortcut.
-
- bool cycle = !visited->insert(module).second;
- if (cycle) return;
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- UnorderedStringMap more_exports(zone);
-
- // TODO(neis): Only allocate more_exports if there are star exports.
- // Maybe split special_exports into indirect_exports and star_exports.
-
- ReadOnlyRoots roots(isolate);
- Handle<FixedArray> special_exports(module->info().special_exports(), isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name().IsUndefined(roots)) {
- continue; // Indirect export.
- }
-
- Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(entry->module_request())),
- isolate);
-
- // Recurse.
- FetchStarExports(isolate, requested_module, zone, visited);
-
- // Collect all of [requested_module]'s exports that must be added to
- // [module]'s exports (i.e. to [exports]). We record these in
- // [more_exports]. Ambiguities (conflicting exports) are marked by mapping
- // the name to undefined instead of a Cell.
- Handle<ObjectHashTable> requested_exports(requested_module->exports(),
- isolate);
- for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
- Object key;
- if (!requested_exports->ToKey(roots, i, &key)) continue;
- Handle<String> name(String::cast(key), isolate);
-
- if (name->Equals(roots.default_string())) continue;
- if (!exports->Lookup(name).IsTheHole(roots)) continue;
-
- Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
- auto insert_result = more_exports.insert(std::make_pair(name, cell));
- if (!insert_result.second) {
- auto it = insert_result.first;
- if (*it->second == *cell || it->second->IsUndefined(roots)) {
- // We already recorded this mapping before, or the name is already
- // known to be ambiguous. In either case, there's nothing to do.
- } else {
- DCHECK(it->second->IsCell());
- // Different star exports provide different cells for this name, hence
- // mark the name as ambiguous.
- it->second = roots.undefined_value_handle();
- }
- }
- }
- }
-
- // Copy [more_exports] into [exports].
- for (const auto& elem : more_exports) {
- if (elem.second->IsUndefined(isolate)) continue; // Ambiguous export.
- DCHECK(!elem.first->Equals(ReadOnlyRoots(isolate).default_string()));
- DCHECK(elem.second->IsCell());
- exports = ObjectHashTable::Put(exports, elem.first, elem.second);
- }
- module->set_exports(*exports);
-}
-
-} // anonymous namespace
-
-Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
- Handle<Module> module,
- int module_request) {
- Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(module_request)), isolate);
- return Module::GetModuleNamespace(isolate, requested_module);
}
Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
@@ -848,7 +305,12 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
// Collect the export names.
Zone zone(isolate->allocator(), ZONE_NAME);
UnorderedModuleSet visited(&zone);
- FetchStarExports(isolate, module, &zone, &visited);
+
+ if (module->IsSourceTextModule()) {
+ SourceTextModule::FetchStarExports(
+ isolate, Handle<SourceTextModule>::cast(module), &zone, &visited);
+ }
+
Handle<ObjectHashTable> exports(module->exports(), isolate);
ZoneVector<Handle<String>> names(&zone);
names.reserve(exports->NumberOfElements());
@@ -874,7 +336,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
// Create the properties in the namespace object. Transition the object
// to dictionary mode so that property addition is faster.
PropertyAttributes attr = DONT_DELETE;
- JSObject::NormalizeProperties(ns, CLEAR_INOBJECT_PROPERTIES,
+ JSObject::NormalizeProperties(isolate, ns, CLEAR_INOBJECT_PROPERTIES,
static_cast<int>(names.size()),
"JSModuleNamespace");
for (const auto& name : names) {
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index a1672dce7e..b776ddb0be 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -9,6 +9,7 @@
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,35 +21,23 @@ template <typename T>
class Handle;
class Isolate;
class JSModuleNamespace;
-class ModuleDescriptor;
-class ModuleInfo;
-class ModuleInfoEntry;
+class SourceTextModuleDescriptor;
+class SourceTextModuleInfo;
+class SourceTextModuleInfoEntry;
class String;
class Zone;
-// The runtime representation of an ECMAScript module.
-class Module : public Struct {
+// Module is the base class for ECMAScript module types, roughly corresponding
+// to Abstract Module Record.
+// https://tc39.github.io/ecma262/#sec-abstract-module-records
+class Module : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
- // The code representing this module, or an abstraction thereof.
- // This is either a SharedFunctionInfo, a JSFunction, a JSGeneratorObject, or
- // a ModuleInfo, depending on the state (status) the module is in. See
- // Module::ModuleVerify() for the precise invariant.
- DECL_ACCESSORS(code, Object)
-
- // Arrays of cells corresponding to regular exports and regular imports.
- // A cell's position in the array is determined by the cell index of the
- // associated module entry (which coincides with the variable index of the
- // associated variable).
- DECL_ACCESSORS(regular_exports, FixedArray)
- DECL_ACCESSORS(regular_imports, FixedArray)
-
// The complete export table, mapping an export name to its cell.
- // TODO(neis): We may want to remove the regular exports from the table.
DECL_ACCESSORS(exports, ObjectHashTable)
// Hash for this object (a random non-zero Smi).
@@ -67,31 +56,12 @@ class Module : public Struct {
kErrored
};
- // The exception in the case {status} is kErrored.
- Object GetException();
-
- // The shared function info in case {status} is not kEvaluating, kEvaluated or
- // kErrored.
- SharedFunctionInfo GetSharedFunctionInfo() const;
-
// The namespace object (or undefined).
DECL_ACCESSORS(module_namespace, HeapObject)
- // Modules imported or re-exported by this module.
- // Corresponds 1-to-1 to the module specifier strings in
- // ModuleInfo::module_requests.
- DECL_ACCESSORS(requested_modules, FixedArray)
-
- // [script]: Script from which the module originates.
- DECL_ACCESSORS(script, Script)
-
- // The value of import.meta inside of this module.
- // Lazily initialized on first access. It's the hole before first access and
- // a JSObject afterwards.
- DECL_ACCESSORS(import_meta, Object)
-
- // Get the ModuleInfo associated with the code.
- inline ModuleInfo info() const;
+ // The exception in the case {status} is kErrored.
+ Object GetException();
+ DECL_ACCESSORS(exception, Object)
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
@@ -105,63 +75,20 @@ class Module : public Struct {
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
Isolate* isolate, Handle<Module> module);
- Cell GetCell(int cell_index);
- static Handle<Object> LoadVariable(Isolate* isolate, Handle<Module> module,
- int cell_index);
- static void StoreVariable(Handle<Module> module, int cell_index,
- Handle<Object> value);
-
- static int ImportIndex(int cell_index);
- static int ExportIndex(int cell_index);
-
- // Get the namespace object for [module_request] of [module]. If it doesn't
- // exist yet, it is created.
- static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
- Handle<Module> module,
- int module_request);
-
// Get the namespace object for [module]. If it doesn't exist yet, it is
// created.
static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
Handle<Module> module);
// Layout description.
-#define MODULE_FIELDS(V) \
- V(kCodeOffset, kTaggedSize) \
- V(kExportsOffset, kTaggedSize) \
- V(kRegularExportsOffset, kTaggedSize) \
- V(kRegularImportsOffset, kTaggedSize) \
- V(kHashOffset, kTaggedSize) \
- V(kModuleNamespaceOffset, kTaggedSize) \
- V(kRequestedModulesOffset, kTaggedSize) \
- V(kStatusOffset, kTaggedSize) \
- V(kDfsIndexOffset, kTaggedSize) \
- V(kDfsAncestorIndexOffset, kTaggedSize) \
- V(kExceptionOffset, kTaggedSize) \
- V(kScriptOffset, kTaggedSize) \
- V(kImportMetaOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, MODULE_FIELDS)
-#undef MODULE_FIELDS
-
- private:
- friend class Factory;
-
- DECL_ACCESSORS(exception, Object)
-
- // TODO(neis): Don't store those in the module object?
- DECL_INT_ACCESSORS(dfs_index)
- DECL_INT_ACCESSORS(dfs_ancestor_index)
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_MODULE_FIELDS)
- // Helpers for Instantiate and Evaluate.
+ using BodyDescriptor =
+ FixedBodyDescriptor<kExportsOffset, kHeaderSize, kHeaderSize>;
- static void CreateExport(Isolate* isolate, Handle<Module> module,
- int cell_index, Handle<FixedArray> names);
- static void CreateIndirectExport(Isolate* isolate, Handle<Module> module,
- Handle<String> name,
- Handle<ModuleInfoEntry> entry);
+ protected:
+ friend class Factory;
// The [must_resolve] argument indicates whether or not an exception should be
// thrown in case the module does not provide an export named [name]
@@ -176,32 +103,18 @@ class Module : public Struct {
Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
Handle<String> export_name, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
- static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
- Isolate* isolate, Handle<Module> module, Handle<String> name,
- int module_request, MessageLocation loc, bool must_resolve,
- ResolveSet* resolve_set);
-
- static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
- Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
- Handle<String> export_name, MessageLocation loc, bool must_resolve,
- ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
Isolate* isolate, Handle<Module> module, v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback);
static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack, unsigned* dfs_index, Zone* zone);
- static V8_WARN_UNUSED_RESULT bool RunInitializationCode(
- Isolate* isolate, Handle<Module> module);
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
+ Zone* zone);
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack, unsigned* dfs_index);
-
- static V8_WARN_UNUSED_RESULT bool MaybeTransitionComponent(
- Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack, Status new_status);
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index);
// Set module's status back to kUninstantiated and reset other internal state.
// This is used when instantiation fails.
@@ -217,7 +130,7 @@ class Module : public Struct {
void PrintStatusTransition(Status new_status);
#endif // DEBUG
- OBJECT_CONSTRUCTORS(Module, Struct);
+ OBJECT_CONSTRUCTORS(Module, HeapObject);
};
// When importing a module namespace (import * as foo from "bar"), a
@@ -250,93 +163,16 @@ class JSModuleNamespace : public JSObject {
kInObjectFieldCount,
};
-// Layout description.
-#define JS_MODULE_NAMESPACE_FIELDS(V) \
- V(kModuleOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0) \
- V(kInObjectFieldsOffset, kTaggedSize* kInObjectFieldCount) \
- /* Total size. */ \
- V(kSize, 0)
-
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_MODULE_NAMESPACE_FIELDS)
-#undef JS_MODULE_NAMESPACE_FIELDS
-
- OBJECT_CONSTRUCTORS(JSModuleNamespace, JSObject);
-};
-
-// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
-class ModuleInfo : public FixedArray {
- public:
- DECL_CAST(ModuleInfo)
-
- static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
- ModuleDescriptor* descr);
-
- inline FixedArray module_requests() const;
- inline FixedArray special_exports() const;
- inline FixedArray regular_exports() const;
- inline FixedArray regular_imports() const;
- inline FixedArray namespace_imports() const;
- inline FixedArray module_request_positions() const;
-
- // Accessors for [regular_exports].
- int RegularExportCount() const;
- String RegularExportLocalName(int i) const;
- int RegularExportCellIndex(int i) const;
- FixedArray RegularExportExportNames(int i) const;
+ TORQUE_GENERATED_JSMODULE_NAMESPACE_FIELDS)
-#ifdef DEBUG
- inline bool Equals(ModuleInfo other) const;
-#endif
+ // We need to include in-object fields
+ // TODO(v8:8944): improve handling of in-object fields
+ static constexpr int kSize =
+ kHeaderSize + (kTaggedSize * kInObjectFieldCount);
- private:
- friend class Factory;
- friend class ModuleDescriptor;
- enum {
- kModuleRequestsIndex,
- kSpecialExportsIndex,
- kRegularExportsIndex,
- kNamespaceImportsIndex,
- kRegularImportsIndex,
- kModuleRequestPositionsIndex,
- kLength
- };
- enum {
- kRegularExportLocalNameOffset,
- kRegularExportCellIndexOffset,
- kRegularExportExportNamesOffset,
- kRegularExportLength
- };
- OBJECT_CONSTRUCTORS(ModuleInfo, FixedArray);
-};
-
-class ModuleInfoEntry : public Struct {
- public:
- DECL_CAST(ModuleInfoEntry)
- DECL_PRINTER(ModuleInfoEntry)
- DECL_VERIFIER(ModuleInfoEntry)
-
- DECL_ACCESSORS(export_name, Object)
- DECL_ACCESSORS(local_name, Object)
- DECL_ACCESSORS(import_name, Object)
- DECL_INT_ACCESSORS(module_request)
- DECL_INT_ACCESSORS(cell_index)
- DECL_INT_ACCESSORS(beg_pos)
- DECL_INT_ACCESSORS(end_pos)
-
- static Handle<ModuleInfoEntry> New(Isolate* isolate,
- Handle<Object> export_name,
- Handle<Object> local_name,
- Handle<Object> import_name,
- int module_request, int cell_index,
- int beg_pos, int end_pos);
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_MODULE_INFO_ENTRY_FIELDS)
-
- OBJECT_CONSTRUCTORS(ModuleInfoEntry, Struct);
+ OBJECT_CONSTRUCTORS(JSModuleNamespace, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index b3e04bbd50..8aded12fb5 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -16,14 +16,9 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Name, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(Symbol, Name)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Name)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Symbol)
-CAST_ACCESSOR(Name)
-CAST_ACCESSOR(Symbol)
-
-ACCESSORS(Symbol, name, Object, kNameOffset)
-INT_ACCESSORS(Symbol, flags, kFlagsOffset)
BIT_FIELD_ACCESSORS(Symbol, flags, is_private, Symbol::IsPrivateBit)
BIT_FIELD_ACCESSORS(Symbol, flags, is_well_known_symbol,
Symbol::IsWellKnownSymbolBit)
@@ -44,20 +39,14 @@ void Symbol::set_is_private_name() {
set_flags(Symbol::IsPrivateNameBit::update(flags(), true));
}
-bool Name::IsUniqueName() const {
- uint32_t type = map().instance_type();
+DEF_GETTER(Name, IsUniqueName, bool) {
+ uint32_t type = map(isolate).instance_type();
bool result = (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
(kStringTag | kNotInternalizedTag);
SLOW_DCHECK(result == HeapObject::IsUniqueName());
return result;
}
-uint32_t Name::hash_field() { return ReadField<uint32_t>(kHashFieldOffset); }
-
-void Name::set_hash_field(uint32_t value) {
- WriteField<uint32_t>(kHashFieldOffset, value);
-}
-
bool Name::Equals(Name other) {
if (other == *this) return true;
if ((this->IsInternalizedString() && other.IsInternalizedString()) ||
@@ -91,17 +80,17 @@ uint32_t Name::Hash() {
return String::cast(*this).ComputeAndSetHash();
}
-bool Name::IsInterestingSymbol() const {
- return IsSymbol() && Symbol::cast(*this).is_interesting_symbol();
+DEF_GETTER(Name, IsInterestingSymbol, bool) {
+ return IsSymbol(isolate) && Symbol::cast(*this).is_interesting_symbol();
}
-bool Name::IsPrivate() {
- return this->IsSymbol() && Symbol::cast(*this).is_private();
+DEF_GETTER(Name, IsPrivate, bool) {
+ return this->IsSymbol(isolate) && Symbol::cast(*this).is_private();
}
-bool Name::IsPrivateName() {
+DEF_GETTER(Name, IsPrivateName, bool) {
bool is_private_name =
- this->IsSymbol() && Symbol::cast(*this).is_private_name();
+ this->IsSymbol(isolate) && Symbol::cast(*this).is_private_name();
DCHECK_IMPLIES(is_private_name, IsPrivate());
return is_private_name;
}
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 8b2a8f0a01..b13aa30fb0 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -7,7 +7,7 @@
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
-#include "torque-generated/field-offsets-tq.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,12 +17,8 @@ namespace internal {
// The Name abstract class captures anything that can be used as a property
// name, i.e., strings and symbols. All names store a hash value.
-class Name : public HeapObject {
+class Name : public TorqueGeneratedName<Name, HeapObject> {
public:
- // Get and set the hash field of the name.
- inline uint32_t hash_field();
- inline void set_hash_field(uint32_t value);
-
// Tells whether the hash code has been computed.
inline bool HasHashCode();
@@ -43,15 +39,19 @@ class Name : public HeapObject {
// symbol properties are added, so we can optimize lookups on objects
// that don't have the flag.
inline bool IsInterestingSymbol() const;
+ inline bool IsInterestingSymbol(Isolate* isolate) const;
// If the name is private, it can only name own properties.
- inline bool IsPrivate();
+ inline bool IsPrivate() const;
+ inline bool IsPrivate(Isolate* isolate) const;
// If the name is a private name, it should behave like a private
// symbol but also throw on property access miss.
- inline bool IsPrivateName();
+ inline bool IsPrivateName() const;
+ inline bool IsPrivateName(Isolate* isolate) const;
inline bool IsUniqueName() const;
+ inline bool IsUniqueName(Isolate* isolate) const;
static inline bool ContainsCachedArrayIndex(uint32_t hash);
@@ -62,15 +62,10 @@ class Name : public HeapObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToFunctionName(
Isolate* isolate, Handle<Name> name, Handle<String> prefix);
- DECL_CAST(Name)
-
DECL_PRINTER(Name)
void NameShortPrint();
int NameShortPrint(Vector<char> str);
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_NAME_FIELDS)
-
// Mask constant for checking if a name has a computed hash code
// and if it is a string that is an array index. The least significant bit
// indicates whether a hash code has been computed. If the hash code has
@@ -131,17 +126,12 @@ class Name : public HeapObject {
protected:
static inline bool IsHashFieldComputed(uint32_t field);
- OBJECT_CONSTRUCTORS(Name, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Name)
};
// ES6 symbols.
-class Symbol : public Name {
+class Symbol : public TorqueGeneratedSymbol<Symbol, Name> {
public:
- // [name]: The print name of a symbol, or undefined if none.
- DECL_ACCESSORS(name, Object)
-
- DECL_INT_ACCESSORS(flags)
-
// [is_private]: Whether this is a private symbol. Private symbols can only
// be used to designate own properties of objects.
DECL_BOOLEAN_ACCESSORS(is_private)
@@ -169,15 +159,10 @@ class Symbol : public Name {
inline bool is_private_name() const;
inline void set_is_private_name();
- DECL_CAST(Symbol)
-
// Dispatched behavior.
DECL_PRINTER(Symbol)
DECL_VERIFIER(Symbol)
- DEFINE_FIELD_OFFSET_CONSTANTS(Name::kHeaderSize,
- TORQUE_GENERATED_SYMBOL_FIELDS)
-
// Flags layout.
#define FLAGS_BIT_FIELDS(V, _) \
V(IsPrivateBit, bool, 1, _) \
@@ -199,7 +184,7 @@ class Symbol : public Name {
// TODO(cbruni): remove once the new maptracer is in place.
friend class Name; // For PrivateSymbolToName.
- OBJECT_CONSTRUCTORS(Symbol, Name);
+ TQ_OBJECT_CONSTRUCTORS(Symbol)
};
} // namespace internal
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 78452de502..c15b212eec 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -27,6 +27,7 @@ class FunctionLiteral;
class FunctionTemplateInfo;
class JSAsyncGeneratorObject;
class JSGlobalProxy;
+class SourceTextModule;
class JSPromise;
class JSProxy;
class JSProxyRevocableResult;
@@ -35,7 +36,7 @@ class LayoutDescriptor;
class LookupIterator;
class FieldType;
class Module;
-class ModuleInfoEntry;
+class SourceTextModuleInfoEntry;
class MutableHeapNumber;
class ObjectHashTable;
class ObjectTemplateInfo;
@@ -53,6 +54,7 @@ class ScriptContextTable;
class SharedFunctionInfo;
class StringStream;
class Symbol;
+class SyntheticModule;
class FeedbackCell;
class FeedbackMetadata;
class FeedbackVector;
@@ -134,10 +136,13 @@ class ZoneForwardList;
V(JSAsyncGeneratorObject) \
V(JSBoundFunction) \
V(JSCollection) \
+ V(JSCollectionIterator) \
V(JSContextExtensionObject) \
V(JSDataView) \
V(JSDate) \
V(JSError) \
+ V(JSFinalizationGroup) \
+ V(JSFinalizationGroupCleanupIterator) \
V(JSFunction) \
V(JSGeneratorObject) \
V(JSGlobalObject) \
@@ -147,6 +152,7 @@ class ZoneForwardList;
V(JSMessageObject) \
V(JSModuleNamespace) \
V(JSObject) \
+ V(JSPrimitiveWrapper) \
V(JSPromise) \
V(JSProxy) \
V(JSReceiver) \
@@ -158,18 +164,15 @@ class ZoneForwardList;
V(JSSloppyArgumentsObject) \
V(JSStringIterator) \
V(JSTypedArray) \
- V(JSValue) \
- V(JSWeakRef) \
V(JSWeakCollection) \
- V(JSFinalizationGroup) \
- V(JSFinalizationGroupCleanupIterator) \
+ V(JSWeakRef) \
V(JSWeakMap) \
V(JSWeakSet) \
V(LoadHandler) \
V(Map) \
V(MapCache) \
+ V(Module) \
V(Microtask) \
- V(ModuleInfo) \
V(MutableHeapNumber) \
V(Name) \
V(NameDictionary) \
@@ -202,6 +205,8 @@ class ZoneForwardList;
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(SourceTextModuleInfo) \
V(StoreHandler) \
V(String) \
V(StringSet) \
@@ -210,6 +215,7 @@ class ZoneForwardList;
V(Struct) \
V(Symbol) \
V(SymbolWrapper) \
+ V(SyntheticModule) \
V(TemplateInfo) \
V(TemplateList) \
V(ThinString) \
@@ -248,9 +254,17 @@ class ZoneForwardList;
#define HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) V(HashTable)
+// Logical sub-types of heap objects that don't correspond to a C++ class but
+// represent some specialization in terms of additional constraints.
+#define HEAP_OBJECT_SPECIALIZED_TYPE_LIST(V) \
+ V(CallableApiObject) \
+ V(CallableJSProxy) \
+ V(NonNullForeign)
+
#define HEAP_OBJECT_TYPE_LIST(V) \
HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
- HEAP_OBJECT_TEMPLATE_TYPE_LIST(V)
+ HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) \
+ HEAP_OBJECT_SPECIALIZED_TYPE_LIST(V)
#define ODDBALL_LIST(V) \
V(Undefined, undefined_value) \
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index c8ebf57ce7..b96c03c00f 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -11,12 +11,16 @@
#undef NEVER_READ_ONLY_SPACE
#undef NEVER_READ_ONLY_SPACE_IMPL
#undef DECL_PRIMITIVE_ACCESSORS
+#undef DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
+#undef DECL_SYNCHRONIZED_INT_ACCESSORS
#undef DECL_INT32_ACCESSORS
#undef DECL_UINT16_ACCESSORS
#undef DECL_INT16_ACCESSORS
#undef DECL_UINT8_ACCESSORS
+#undef DECL_GETTER
+#undef DEF_GETTER
#undef DECL_ACCESSORS
#undef DECL_CAST
#undef CAST_ACCESSOR
@@ -45,13 +49,10 @@
#undef TYPE_CHECKER
#undef RELAXED_INT16_ACCESSORS
#undef FIELD_ADDR
-#undef READ_FIELD
-#undef READ_WEAK_FIELD
#undef ACQUIRE_READ_FIELD
#undef RELAXED_READ_FIELD
#undef RELAXED_READ_WEAK_FIELD
#undef WRITE_FIELD
-#undef WRITE_WEAK_FIELD
#undef RELEASE_WRITE_FIELD
#undef RELAXED_WRITE_FIELD
#undef RELAXED_WRITE_WEAK_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 1f499d4fba..8f9e51ca9e 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -14,15 +14,18 @@
// for fields that can be written to and read from multiple threads at the same
// time. See comments in src/base/atomicops.h for the memory ordering sematics.
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
// Since this changes visibility, it should always be last in a class
// definition.
-#define OBJECT_CONSTRUCTORS(Type, ...) \
- public: \
- constexpr Type() : __VA_ARGS__() {} \
- \
- protected: \
+#define OBJECT_CONSTRUCTORS(Type, ...) \
+ public: \
+ constexpr Type() : __VA_ARGS__() {} \
+ \
+ protected: \
+ template <typename TFieldType, int kFieldOffset> \
+ friend class TaggedField; \
+ \
explicit inline Type(Address ptr)
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
@@ -34,22 +37,27 @@
// TODO(leszeks): Add checks in the factory that we never allocate these
// objects in RO space.
-#define NEVER_READ_ONLY_SPACE_IMPL(Type) \
- Heap* Type::GetHeap() const { \
- return NeverReadOnlySpaceObject::GetHeap(*this); \
- } \
- Isolate* Type::GetIsolate() const { \
- return NeverReadOnlySpaceObject::GetIsolate(*this); \
+#define NEVER_READ_ONLY_SPACE_IMPL(Type) \
+ Heap* Type::GetHeap() const { return GetHeapFromWritableObject(*this); } \
+ Isolate* Type::GetIsolate() const { \
+ return GetIsolateFromWritableObject(*this); \
}
#define DECL_PRIMITIVE_ACCESSORS(name, type) \
inline type name() const; \
inline void set_##name(type value);
+#define DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS(name, type) \
+ inline type synchronized_##name() const; \
+ inline void synchronized_set_##name(type value);
+
#define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool)
#define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int)
+#define DECL_SYNCHRONIZED_INT_ACCESSORS(name) \
+ DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS(name, int)
+
#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
#define DECL_UINT16_ACCESSORS(name) \
@@ -64,8 +72,22 @@
inline uint8_t name() const; \
inline void set_##name(int value);
+// TODO(ishell): eventually isolate-less getters should not be used anymore.
+// For full pointer-mode the C++ compiler should optimize away unused isolate
+// parameter.
+#define DECL_GETTER(name, type) \
+ inline type name() const; \
+ inline type name(Isolate* isolate) const;
+
+#define DEF_GETTER(holder, name, type) \
+ type holder::name() const { \
+ Isolate* isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate); \
+ } \
+ type holder::name(Isolate* isolate) const
+
#define DECL_ACCESSORS(name, type) \
- inline type name() const; \
+ DECL_GETTER(name, type) \
inline void set_##name(type value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -112,14 +134,14 @@
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
- type holder::name() const { \
- type value = type::cast(READ_FIELD(*this, offset)); \
+ DEF_GETTER(holder, name, type) { \
+ type value = TaggedField<type, offset>::load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(type value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
- WRITE_FIELD(*this, offset, value); \
+ TaggedField<type, offset>::store(*this, value); \
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
@@ -129,17 +151,17 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
- get_condition, set_condition) \
- type holder::name() const { \
- type value = type::cast(ACQUIRE_READ_FIELD(*this, offset)); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(type value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- RELEASE_WRITE_FIELD(*this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
+#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
+ get_condition, set_condition) \
+ DEF_GETTER(holder, name, type) { \
+ type value = TaggedField<type, offset>::Acquire_Load(isolate, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<type, offset>::Release_Store(*this, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
#define SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
@@ -151,14 +173,15 @@
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
set_condition) \
- MaybeObject holder::name() const { \
- MaybeObject value = READ_WEAK_FIELD(*this, offset); \
+ DEF_GETTER(holder, name, MaybeObject) { \
+ MaybeObject value = \
+ TaggedField<MaybeObject, offset>::load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
- WRITE_WEAK_FIELD(*this, offset, value); \
+ TaggedField<MaybeObject, offset>::store(*this, value); \
CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
}
@@ -169,36 +192,44 @@
WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
// Getter that returns a Smi as an int and writes an int as a Smi.
-#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
- int holder::name() const { \
- DCHECK(condition); \
- Object value = READ_FIELD(*this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::set_##name(int value) { \
- DCHECK(condition); \
- WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
+#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
+ int holder::name() const { \
+ DCHECK(condition); \
+ Smi value = TaggedField<Smi, offset>::load(*this); \
+ return value.value(); \
+ } \
+ void holder::set_##name(int value) { \
+ DCHECK(condition); \
+ TaggedField<Smi, offset>::store(*this, Smi::FromInt(value)); \
}
#define SMI_ACCESSORS(holder, name, offset) \
SMI_ACCESSORS_CHECKED(holder, name, offset, true)
-#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
- int holder::synchronized_##name() const { \
- Object value = ACQUIRE_READ_FIELD(*this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::synchronized_set_##name(int value) { \
- RELEASE_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
+#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::synchronized_##name() const { \
+ Smi value = TaggedField<Smi, offset>::Acquire_Load(*this); \
+ return value.value(); \
+ } \
+ void holder::synchronized_set_##name(int value) { \
+ TaggedField<Smi, offset>::Release_Store(*this, Smi::FromInt(value)); \
+ }
+
+#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::relaxed_read_##name() const { \
+ Smi value = TaggedField<Smi, offset>::Relaxed_Load(*this); \
+ return value.value(); \
+ } \
+ void holder::relaxed_write_##name(int value) { \
+ TaggedField<Smi, offset>::Relaxed_Store(*this, Smi::FromInt(value)); \
}
-#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
- int holder::relaxed_read_##name() const { \
- Object value = RELAXED_READ_FIELD(*this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::relaxed_write_##name(int value) { \
- RELAXED_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
+#define TQ_SMI_ACCESSORS(holder, name) \
+ int holder::name() const { \
+ return TorqueGenerated##holder<holder, Super>::name().value(); \
+ } \
+ void holder::set_##name(int value) { \
+ TorqueGenerated##holder<holder, Super>::set_##name(Smi::FromInt(value)); \
}
#define BOOL_GETTER(holder, field, name, offset) \
@@ -223,9 +254,9 @@
return instance_type == forinstancetype; \
}
-#define TYPE_CHECKER(type, ...) \
- bool HeapObject::Is##type() const { \
- return InstanceTypeChecker::Is##type(map().instance_type()); \
+#define TYPE_CHECKER(type, ...) \
+ DEF_GETTER(HeapObject, Is##type, bool) { \
+ return InstanceTypeChecker::Is##type(map(isolate).instance_type()); \
}
#define RELAXED_INT16_ACCESSORS(holder, name, offset) \
@@ -238,39 +269,26 @@
#define FIELD_ADDR(p, offset) ((p).ptr() + offset - kHeapObjectTag)
-#define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
-
-#define READ_WEAK_FIELD(p, offset) (*MaybeObjectSlot(FIELD_ADDR(p, offset)))
-
#define ACQUIRE_READ_FIELD(p, offset) \
- ObjectSlot(FIELD_ADDR(p, offset)).Acquire_Load()
+ TaggedField<Object>::Acquire_Load(p, offset)
#define RELAXED_READ_FIELD(p, offset) \
- ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
+ TaggedField<Object>::Relaxed_Load(p, offset)
#define RELAXED_READ_WEAK_FIELD(p, offset) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
+ TaggedField<MaybeObject>::Relaxed_Load(p, offset)
-#ifdef V8_CONCURRENT_MARKING
-#define WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#define WRITE_WEAK_FIELD(p, offset, value) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#else
#define WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).store(value)
-#define WRITE_WEAK_FIELD(p, offset, value) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).store(value)
-#endif
+ TaggedField<Object>::store(p, offset, value)
#define RELEASE_WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).Release_Store(value)
+ TaggedField<Object>::Release_Store(p, offset, value)
#define RELAXED_WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
+ TaggedField<Object>::Relaxed_Store(p, offset, value)
#define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
+ TaggedField<MaybeObject>::Relaxed_Store(p, offset, value)
#define WRITE_BARRIER(object, offset, value) \
do { \
@@ -412,12 +430,15 @@
set(IndexForEntry(i) + k##name##Offset, value); \
}
-#define TQ_OBJECT_CONSTRUCTORS(Type) \
- public: \
- constexpr Type() = default; \
- \
- protected: \
- inline explicit Type(Address ptr); \
+#define TQ_OBJECT_CONSTRUCTORS(Type) \
+ public: \
+ constexpr Type() = default; \
+ \
+ protected: \
+ template <typename TFieldType, int kFieldOffset> \
+ friend class TaggedField; \
+ \
+ inline explicit Type(Address ptr); \
friend class TorqueGenerated##Type<Type, Super>;
#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 8626165647..51e380695e 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -19,6 +19,8 @@
#include "src/objects/js-weak-refs.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/source-text-module.h"
+#include "src/objects/synthetic-module.h"
#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -918,7 +920,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_GENERATOR_OBJECT_TYPE:
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
@@ -1043,6 +1045,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
} else if (type == WASM_CAPI_FUNCTION_DATA_TYPE) {
return Op::template apply<WasmCapiFunctionData::BodyDescriptor>(p1, p2,
p3, p4);
+ } else if (type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
+ return Op::template apply<WasmIndirectFunctionTable::BodyDescriptor>(
+ p1, p2, p3, p4);
} else {
return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
}
@@ -1051,6 +1056,12 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
return Op::template apply<DataHandler::BodyDescriptor>(p1, p2, p3, p4);
+ case SOURCE_TEXT_MODULE_TYPE:
+ return Op::template apply<SourceTextModule::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case SYNTHETIC_MODULE_TYPE:
+ return Op::template apply<SyntheticModule::BodyDescriptor>(p1, p2, p3,
+ p4);
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 90824c68ef..b4c8591e5c 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -7,6 +7,8 @@
#include "src/init/heap-symbols.h"
+#include "torque-generated/instance-types-tq.h"
+
namespace v8 {
namespace internal {
@@ -31,7 +33,7 @@ namespace internal {
// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
// Object::IsString.
//
-// NOTE: Everything following JS_VALUE_TYPE is considered a
+// NOTE: Everything following JS_PRIMITIVE_WRAPPER_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
//
@@ -80,6 +82,7 @@ namespace internal {
V(ACCESSOR_PAIR_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(ALLOCATION_MEMENTO_TYPE) \
+ V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
V(ASM_WASM_DATA_TYPE) \
V(ASYNC_GENERATOR_REQUEST_TYPE) \
V(CLASS_POSITIONS_TYPE) \
@@ -89,24 +92,23 @@ namespace internal {
V(FUNCTION_TEMPLATE_RARE_DATA_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
V(INTERPRETER_DATA_TYPE) \
- V(MODULE_INFO_ENTRY_TYPE) \
- V(MODULE_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(PROMISE_CAPABILITY_TYPE) \
V(PROMISE_REACTION_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE) \
+ V(SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
V(STACK_TRACE_FRAME_TYPE) \
V(TEMPLATE_OBJECT_DESCRIPTION_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
- V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
V(WASM_CAPI_FUNCTION_DATA_TYPE) \
V(WASM_DEBUG_INFO_TYPE) \
V(WASM_EXCEPTION_TAG_TYPE) \
V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
+ V(WASM_INDIRECT_FUNCTION_TABLE_TYPE) \
V(WASM_JS_FUNCTION_DATA_TYPE) \
\
V(CALLABLE_TASK_TYPE) \
@@ -116,6 +118,11 @@ namespace internal {
V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
V(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE) \
\
+ TORQUE_DEFINED_INSTANCE_TYPES(V) \
+ \
+ V(SOURCE_TEXT_MODULE_TYPE) \
+ V(SYNTHETIC_MODULE_TYPE) \
+ \
V(ALLOCATION_SITE_TYPE) \
V(EMBEDDER_DATA_ARRAY_TYPE) \
\
@@ -174,7 +181,7 @@ namespace internal {
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_MODULE_NAMESPACE_TYPE) \
V(JS_SPECIAL_API_OBJECT_TYPE) \
- V(JS_VALUE_TYPE) \
+ V(JS_PRIMITIVE_WRAPPER_TYPE) \
V(JS_API_OBJECT_TYPE) \
V(JS_OBJECT_TYPE) \
\
@@ -296,6 +303,8 @@ namespace internal {
V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
aliased_arguments_entry) \
V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
+ V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
+ array_boilerplate_description) \
V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
@@ -308,8 +317,6 @@ namespace internal {
function_template_rare_data) \
V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
- V(_, MODULE_INFO_ENTRY_TYPE, ModuleInfoEntry, module_info_entry) \
- V(_, MODULE_TYPE, Module, module) \
V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
@@ -317,20 +324,22 @@ namespace internal {
V(_, SCRIPT_TYPE, Script, script) \
V(_, SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE, \
SourcePositionTableWithFrameCache, source_position_table_with_frame_cache) \
+ V(_, SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, SourceTextModuleInfoEntry, \
+ module_info_entry) \
V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
template_object_description) \
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
V(_, TUPLE3_TYPE, Tuple3, tuple3) \
- V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
- array_boilerplate_description) \
V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
wasm_capi_function_data) \
V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
wasm_exported_function_data) \
+ V(_, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \
+ wasm_indirect_function_table) \
V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) \
V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
@@ -347,14 +356,18 @@ namespace internal {
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
// Produces (NAME, Name, name) entries.
-#define STRUCT_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
+#define STRUCT_LIST(V) \
+ STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V) \
+ TORQUE_STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_MAPS_LIST entry
#define STRUCT_MAPS_LIST_ADAPTER(V, NAME, Name, name) \
V(Map, name##_map, Name##Map)
// Produces (Map, struct_name_map, StructNameMap) entries
-#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
+#define STRUCT_MAPS_LIST(V) \
+ STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V) \
+ TORQUE_STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
//
// The following macros define list of allocation size objects and list of
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index ce92d64f2f..b6748401c0 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -15,11 +15,12 @@
#include "src/objects/objects.h"
#include "src/base/bits.h"
+#include "src/base/memory.h"
#include "src/builtins/builtins.h"
-#include "src/common/v8memory.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/read-only-heap-inl.h"
#include "src/numbers/conversions.h"
#include "src/numbers/double.h"
#include "src/objects/bigint.h"
@@ -37,6 +38,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
+#include "src/objects/tagged-field-inl.h"
#include "src/objects/tagged-impl-inl.h"
#include "src/objects/templates.h"
#include "src/sanitizer/tsan.h"
@@ -64,30 +66,37 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kTaggedSize : 1;
}
-bool HeapObject::IsSloppyArgumentsElements() const {
- return IsFixedArrayExact();
+DEF_GETTER(HeapObject, IsSloppyArgumentsElements, bool) {
+ return IsFixedArrayExact(isolate);
}
-bool HeapObject::IsJSSloppyArgumentsObject() const {
- return IsJSArgumentsObject();
+DEF_GETTER(HeapObject, IsJSSloppyArgumentsObject, bool) {
+ return IsJSArgumentsObject(isolate);
}
-bool HeapObject::IsJSGeneratorObject() const {
- return map().instance_type() == JS_GENERATOR_OBJECT_TYPE ||
- IsJSAsyncFunctionObject() || IsJSAsyncGeneratorObject();
+DEF_GETTER(HeapObject, IsJSGeneratorObject, bool) {
+ return map(isolate).instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ IsJSAsyncFunctionObject(isolate) || IsJSAsyncGeneratorObject(isolate);
}
-bool HeapObject::IsDataHandler() const {
- return IsLoadHandler() || IsStoreHandler();
+DEF_GETTER(HeapObject, IsDataHandler, bool) {
+ return IsLoadHandler(isolate) || IsStoreHandler(isolate);
}
-bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
+DEF_GETTER(HeapObject, IsClassBoilerplate, bool) {
+ return IsFixedArrayExact(isolate);
+}
-#define IS_TYPE_FUNCTION_DEF(type_) \
- bool Object::Is##type_() const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
+#define IS_TYPE_FUNCTION_DEF(type_) \
+ bool Object::Is##type_() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
+ } \
+ bool Object::Is##type_(Isolate* isolate) const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(isolate); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
+IS_TYPE_FUNCTION_DEF(HashTableBase)
+IS_TYPE_FUNCTION_DEF(SmallOrderedHashTable)
#undef IS_TYPE_FUNCTION_DEF
#define IS_TYPE_FUNCTION_DEF(Type, Value) \
@@ -140,109 +149,166 @@ bool HeapObject::IsNullOrUndefined() const {
return IsNullOrUndefined(GetReadOnlyRoots());
}
-bool HeapObject::IsUniqueName() const {
- return IsInternalizedString() || IsSymbol();
+DEF_GETTER(HeapObject, IsUniqueName, bool) {
+ return IsInternalizedString(isolate) || IsSymbol(isolate);
}
-bool HeapObject::IsFunction() const {
+DEF_GETTER(HeapObject, IsFunction, bool) {
STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- return map().instance_type() >= FIRST_FUNCTION_TYPE;
+ return map(isolate).instance_type() >= FIRST_FUNCTION_TYPE;
}
-bool HeapObject::IsCallable() const { return map().is_callable(); }
+DEF_GETTER(HeapObject, IsCallable, bool) { return map(isolate).is_callable(); }
-bool HeapObject::IsConstructor() const { return map().is_constructor(); }
+DEF_GETTER(HeapObject, IsCallableJSProxy, bool) {
+ return IsCallable(isolate) && IsJSProxy(isolate);
+}
-bool HeapObject::IsModuleInfo() const {
- return map() == GetReadOnlyRoots().module_info_map();
+DEF_GETTER(HeapObject, IsCallableApiObject, bool) {
+ InstanceType type = map(isolate).instance_type();
+ return IsCallable(isolate) &&
+ (type == JS_API_OBJECT_TYPE || type == JS_SPECIAL_API_OBJECT_TYPE);
}
-bool HeapObject::IsTemplateInfo() const {
- return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
+DEF_GETTER(HeapObject, IsNonNullForeign, bool) {
+ return IsForeign(isolate) &&
+ Foreign::cast(*this).foreign_address() != kNullAddress;
}
-bool HeapObject::IsConsString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsCons();
+DEF_GETTER(HeapObject, IsConstructor, bool) {
+ return map(isolate).is_constructor();
}
-bool HeapObject::IsThinString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsThin();
+DEF_GETTER(HeapObject, IsSourceTextModuleInfo, bool) {
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ return map(isolate) == GetReadOnlyRoots(isolate).module_info_map();
}
-bool HeapObject::IsSlicedString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSliced();
+DEF_GETTER(HeapObject, IsTemplateInfo, bool) {
+ return IsObjectTemplateInfo(isolate) || IsFunctionTemplateInfo(isolate);
}
-bool HeapObject::IsSeqString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSequential();
+DEF_GETTER(HeapObject, IsConsString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsCons();
}
-bool HeapObject::IsSeqOneByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSequential() &&
- String::cast(*this).IsOneByteRepresentation();
+DEF_GETTER(HeapObject, IsThinString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsThin();
}
-bool HeapObject::IsSeqTwoByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSequential() &&
- String::cast(*this).IsTwoByteRepresentation();
+DEF_GETTER(HeapObject, IsSlicedString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSliced();
}
-bool HeapObject::IsExternalString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsExternal();
+DEF_GETTER(HeapObject, IsSeqString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSequential();
}
-bool HeapObject::IsExternalOneByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsExternal() &&
- String::cast(*this).IsOneByteRepresentation();
+DEF_GETTER(HeapObject, IsSeqOneByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSequential() &&
+ String::cast(*this).IsOneByteRepresentation(isolate);
}
-bool HeapObject::IsExternalTwoByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsExternal() &&
- String::cast(*this).IsTwoByteRepresentation();
+DEF_GETTER(HeapObject, IsSeqTwoByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSequential() &&
+ String::cast(*this).IsTwoByteRepresentation(isolate);
}
-bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
+DEF_GETTER(HeapObject, IsExternalString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsExternal();
+}
-bool Object::IsNumeric() const { return IsNumber() || IsBigInt(); }
+DEF_GETTER(HeapObject, IsExternalOneByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsExternal() &&
+ String::cast(*this).IsOneByteRepresentation(isolate);
+}
-bool HeapObject::IsFiller() const {
- InstanceType instance_type = map().instance_type();
+DEF_GETTER(HeapObject, IsExternalTwoByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsExternal() &&
+ String::cast(*this).IsTwoByteRepresentation(isolate);
+}
+
+bool Object::IsNumber() const {
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.IsHeapNumber(isolate);
+}
+
+bool Object::IsNumber(Isolate* isolate) const {
+ return IsSmi() || IsHeapNumber(isolate);
+}
+
+bool Object::IsNumeric() const {
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.IsHeapNumber(isolate) ||
+ this_heap_object.IsBigInt(isolate);
+}
+
+bool Object::IsNumeric(Isolate* isolate) const {
+ return IsNumber(isolate) || IsBigInt(isolate);
+}
+
+DEF_GETTER(HeapObject, IsFiller, bool) {
+ InstanceType instance_type = map(isolate).instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
-bool HeapObject::IsJSWeakCollection() const {
- return IsJSWeakMap() || IsJSWeakSet();
+DEF_GETTER(HeapObject, IsJSWeakCollection, bool) {
+ return IsJSWeakMap(isolate) || IsJSWeakSet(isolate);
+}
+
+DEF_GETTER(HeapObject, IsJSCollection, bool) {
+ return IsJSMap(isolate) || IsJSSet(isolate);
}
-bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
+DEF_GETTER(HeapObject, IsPromiseReactionJobTask, bool) {
+ return IsPromiseFulfillReactionJobTask(isolate) ||
+ IsPromiseRejectReactionJobTask(isolate);
+}
-bool HeapObject::IsPromiseReactionJobTask() const {
- return IsPromiseFulfillReactionJobTask() || IsPromiseRejectReactionJobTask();
+DEF_GETTER(HeapObject, IsFrameArray, bool) {
+ return IsFixedArrayExact(isolate);
}
-bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
+DEF_GETTER(HeapObject, IsArrayList, bool) {
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ ReadOnlyRoots roots = GetReadOnlyRoots(isolate);
+ return *this == roots.empty_fixed_array() ||
+ map(isolate) == roots.array_list_map();
+}
-bool HeapObject::IsArrayList() const {
- return map() == GetReadOnlyRoots().array_list_map() ||
- *this == GetReadOnlyRoots().empty_fixed_array();
+DEF_GETTER(HeapObject, IsRegExpMatchInfo, bool) {
+ return IsFixedArrayExact(isolate);
}
-bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
+bool Object::IsLayoutDescriptor() const {
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.IsByteArray(isolate);
+}
-bool Object::IsLayoutDescriptor() const { return IsSmi() || IsByteArray(); }
+bool Object::IsLayoutDescriptor(Isolate* isolate) const {
+ return IsSmi() || IsByteArray(isolate);
+}
-bool HeapObject::IsDeoptimizationData() const {
+DEF_GETTER(HeapObject, IsDeoptimizationData, bool) {
// Must be a fixed array.
- if (!IsFixedArrayExact()) return false;
+ if (!IsFixedArrayExact(isolate)) return false;
// There's no sure way to detect the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can
@@ -255,79 +321,98 @@ bool HeapObject::IsDeoptimizationData() const {
return length >= 0 && length % DeoptimizationData::kDeoptEntrySize == 0;
}
-bool HeapObject::IsHandlerTable() const {
- if (!IsFixedArrayExact()) return false;
+DEF_GETTER(HeapObject, IsHandlerTable, bool) {
+ if (!IsFixedArrayExact(isolate)) return false;
// There's actually no way to see the difference between a fixed array and
// a handler table array.
return true;
}
-bool HeapObject::IsTemplateList() const {
- if (!IsFixedArrayExact()) return false;
+DEF_GETTER(HeapObject, IsTemplateList, bool) {
+ if (!IsFixedArrayExact(isolate)) return false;
// There's actually no way to see the difference between a fixed array and
// a template list.
if (FixedArray::cast(*this).length() < 1) return false;
return true;
}
-bool HeapObject::IsDependentCode() const {
- if (!IsWeakFixedArray()) return false;
+DEF_GETTER(HeapObject, IsDependentCode, bool) {
+ if (!IsWeakFixedArray(isolate)) return false;
// There's actually no way to see the difference between a weak fixed array
// and a dependent codes array.
return true;
}
-bool HeapObject::IsAbstractCode() const {
- return IsBytecodeArray() || IsCode();
+DEF_GETTER(HeapObject, IsAbstractCode, bool) {
+ return IsBytecodeArray(isolate) || IsCode(isolate);
}
-bool HeapObject::IsStringWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsString();
+DEF_GETTER(HeapObject, IsStringWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsString(isolate);
}
-bool HeapObject::IsBooleanWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsBoolean();
+DEF_GETTER(HeapObject, IsBooleanWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsBoolean(isolate);
}
-bool HeapObject::IsScriptWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsScript();
+DEF_GETTER(HeapObject, IsScriptWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsScript(isolate);
}
-bool HeapObject::IsNumberWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsNumber();
+DEF_GETTER(HeapObject, IsNumberWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsNumber(isolate);
}
-bool HeapObject::IsBigIntWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsBigInt();
+DEF_GETTER(HeapObject, IsBigIntWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsBigInt(isolate);
}
-bool HeapObject::IsSymbolWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsSymbol();
+DEF_GETTER(HeapObject, IsSymbolWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsSymbol(isolate);
}
-bool HeapObject::IsJSArrayBufferView() const {
- return IsJSDataView() || IsJSTypedArray();
+DEF_GETTER(HeapObject, IsJSArrayBufferView, bool) {
+ return IsJSDataView(isolate) || IsJSTypedArray(isolate);
}
-bool HeapObject::IsStringSet() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsJSCollectionIterator, bool) {
+ return IsJSMapIterator(isolate) || IsJSSetIterator(isolate);
+}
-bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsStringSet, bool) { return IsHashTable(isolate); }
-bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsObjectHashSet, bool) { return IsHashTable(isolate); }
-bool HeapObject::IsMapCache() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsCompilationCacheTable, bool) {
+ return IsHashTable(isolate);
+}
-bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsMapCache, bool) { return IsHashTable(isolate); }
-bool Object::IsHashTableBase() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsObjectHashTable, bool) { return IsHashTable(isolate); }
-bool Object::IsSmallOrderedHashTable() const {
- return IsSmallOrderedHashSet() || IsSmallOrderedHashMap() ||
- IsSmallOrderedNameDictionary();
+DEF_GETTER(HeapObject, IsHashTableBase, bool) { return IsHashTable(isolate); }
+
+DEF_GETTER(HeapObject, IsSmallOrderedHashTable, bool) {
+ return IsSmallOrderedHashSet(isolate) || IsSmallOrderedHashMap(isolate) ||
+ IsSmallOrderedNameDictionary(isolate);
}
bool Object::IsPrimitive() const {
- return IsSmi() || HeapObject::cast(*this).map().IsPrimitiveMap();
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.map(isolate).IsPrimitiveMap();
+}
+
+bool Object::IsPrimitive(Isolate* isolate) const {
+ return IsSmi() || HeapObject::cast(*this).map(isolate).IsPrimitiveMap();
}
// static
@@ -339,19 +424,21 @@ Maybe<bool> Object::IsArray(Handle<Object> object) {
return JSProxy::IsArray(Handle<JSProxy>::cast(object));
}
-bool HeapObject::IsUndetectable() const { return map().is_undetectable(); }
+DEF_GETTER(HeapObject, IsUndetectable, bool) {
+ return map(isolate).is_undetectable();
+}
-bool HeapObject::IsAccessCheckNeeded() const {
- if (IsJSGlobalProxy()) {
+DEF_GETTER(HeapObject, IsAccessCheckNeeded, bool) {
+ if (IsJSGlobalProxy(isolate)) {
const JSGlobalProxy proxy = JSGlobalProxy::cast(*this);
JSGlobalObject global = proxy.GetIsolate()->context().global_object();
return proxy.IsDetachedFrom(global);
}
- return map().is_access_check_needed();
+ return map(isolate).is_access_check_needed();
}
-bool HeapObject::IsStruct() const {
- switch (map().instance_type()) {
+DEF_GETTER(HeapObject, IsStruct, bool) {
+ switch (map(isolate).instance_type()) {
#define MAKE_STRUCT_CASE(TYPE, Name, name) \
case TYPE: \
return true;
@@ -374,10 +461,13 @@ bool HeapObject::IsStruct() const {
}
}
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
- } \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
+ } \
+ bool Object::Is##Name(Isolate* isolate) const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(isolate); \
+ } \
TYPE_CHECKER(Name)
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
@@ -441,25 +531,28 @@ bool Object::FilterKey(PropertyFilter filter) {
return false;
}
-Representation Object::OptimalRepresentation() {
+Representation Object::OptimalRepresentation(Isolate* isolate) const {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
return Representation::Smi();
- } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ }
+ HeapObject heap_object = HeapObject::cast(*this);
+ if (FLAG_track_double_fields && heap_object.IsHeapNumber(isolate)) {
return Representation::Double();
- } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ } else if (FLAG_track_computed_fields &&
+ heap_object.IsUninitialized(
+ heap_object.GetReadOnlyRoots(isolate))) {
return Representation::None();
} else if (FLAG_track_heap_object_fields) {
- DCHECK(IsHeapObject());
return Representation::HeapObject();
} else {
return Representation::Tagged();
}
}
-ElementsKind Object::OptimalElementsKind() {
+ElementsKind Object::OptimalElementsKind(Isolate* isolate) const {
if (IsSmi()) return PACKED_SMI_ELEMENTS;
- if (IsNumber()) return PACKED_DOUBLE_ELEMENTS;
+ if (IsNumber(isolate)) return PACKED_DOUBLE_ELEMENTS;
return PACKED_ELEMENTS;
}
@@ -618,18 +711,18 @@ HeapObject MapWord::ToForwardingAddress() {
#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
- VerifyPointer(isolate, READ_FIELD(*this, offset));
+ VerifyPointer(isolate, TaggedField<Object>::load(isolate, *this, offset));
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
- MaybeObject::VerifyMaybeObjectPointer(isolate,
- READ_WEAK_FIELD(*this, offset));
+ MaybeObject::VerifyMaybeObjectPointer(
+ isolate, TaggedField<MaybeObject>::load(isolate, *this, offset));
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
void HeapObject::VerifySmiField(int offset) {
- CHECK(READ_FIELD(*this, offset).IsSmi());
+ CHECK(TaggedField<Object>::load(*this, offset).IsSmi());
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
@@ -639,7 +732,15 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
-Map HeapObject::map() const { return map_word().ToMap(); }
+ReadOnlyRoots HeapObject::GetReadOnlyRoots(Isolate* isolate) const {
+#ifdef V8_COMPRESS_POINTERS
+ return ReadOnlyRoots(isolate);
+#else
+ return GetReadOnlyRoots();
+#endif
+}
+
+DEF_GETTER(HeapObject, map, Map) { return map_word(isolate).ToMap(); }
void HeapObject::set_map(Map value) {
if (!value.is_null()) {
@@ -655,8 +756,8 @@ void HeapObject::set_map(Map value) {
}
}
-Map HeapObject::synchronized_map() const {
- return synchronized_map_word().ToMap();
+DEF_GETTER(HeapObject, synchronized_map, Map) {
+ return synchronized_map_word(isolate).ToMap();
}
void HeapObject::synchronized_set_map(Map value) {
@@ -693,24 +794,31 @@ void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
}
}
-MapWordSlot HeapObject::map_slot() const {
- return MapWordSlot(FIELD_ADDR(*this, kMapOffset));
+ObjectSlot HeapObject::map_slot() const {
+ return ObjectSlot(MapField::address(*this));
}
-MapWord HeapObject::map_word() const {
- return MapWord(map_slot().Relaxed_Load().ptr());
+DEF_GETTER(HeapObject, map_word, MapWord) {
+ return MapField::Relaxed_Load(isolate, *this);
}
void HeapObject::set_map_word(MapWord map_word) {
- map_slot().Relaxed_Store(Object(map_word.value_));
+ MapField::Relaxed_Store(*this, map_word);
}
-MapWord HeapObject::synchronized_map_word() const {
- return MapWord(map_slot().Acquire_Load().ptr());
+DEF_GETTER(HeapObject, synchronized_map_word, MapWord) {
+ return MapField::Acquire_Load(isolate, *this);
}
void HeapObject::synchronized_set_map_word(MapWord map_word) {
- map_slot().Release_Store(Object(map_word.value_));
+ MapField::Release_Store(*this, map_word);
+}
+
+bool HeapObject::synchronized_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word) {
+ Tagged_t result =
+ MapField::Release_CompareAndSwap(*this, old_map_word, new_map_word);
+ return result == static_cast<Tagged_t>(old_map_word.ptr());
}
int HeapObject::Size() const { return SizeFromMap(map()); }
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 8cc22fa0e5..9963cba472 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -25,13 +25,13 @@
#include "src/builtins/builtins.h"
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/execution/arguments.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/microtask-queue.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
@@ -104,7 +104,7 @@
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/parsing/preparse-data.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/strings/string-builder-inl.h"
#include "src/strings/string-search.h"
#include "src/strings/string-stream.h"
@@ -116,6 +116,9 @@
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
+
namespace v8 {
namespace internal {
@@ -209,8 +212,8 @@ Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
DCHECK(object->FitsRepresentation(representation));
return object;
}
- return isolate->factory()->NewHeapNumber(
- MutableHeapNumber::cast(*object).value());
+ return isolate->factory()->NewHeapNumberFromBits(
+ MutableHeapNumber::cast(*object).value_as_bits());
}
MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
@@ -242,7 +245,7 @@ MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
isolate);
}
Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
- Handle<JSValue>::cast(result)->set_value(*object);
+ Handle<JSPrimitiveWrapper>::cast(result)->set_value(*object);
return result;
}
@@ -2387,9 +2390,9 @@ void DescriptorArray::GeneralizeAllFields() {
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
details = details.CopyWithConstness(PropertyConstness::kMutable);
- SetValue(i, FieldType::Any());
+ SetValue(i, MaybeObject::FromObject(FieldType::Any()));
}
- set(ToDetailsIndex(i), MaybeObject::FromObject(details.AsSmi()));
+ SetDetails(i, details);
}
}
@@ -3043,27 +3046,34 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
}
// Enforce the invariant.
+ return JSProxy::CheckDeleteTrap(isolate, name, target);
+}
+
+Maybe<bool> JSProxy::CheckDeleteTrap(Isolate* isolate, Handle<Name> name,
+ Handle<JSReceiver> target) {
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
PropertyDescriptor target_desc;
- Maybe<bool> owned =
+ Maybe<bool> target_found =
JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
- MAYBE_RETURN(owned, Nothing<bool>());
- if (owned.FromJust()) {
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 11. If targetDesc is undefined, return true.
+ if (target_found.FromJust()) {
+ // 12. If targetDesc.[[Configurable]] is false, throw a TypeError exception.
if (!target_desc.configurable()) {
- isolate->Throw(*factory->NewTypeError(
+ isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxyDeletePropertyNonConfigurable, name));
return Nothing<bool>();
}
// 13. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
// 14. If extensibleTarget is false, throw a TypeError exception.
- Maybe<bool> extensible = JSReceiver::IsExtensible(target);
- MAYBE_RETURN(extensible, Nothing<bool>());
- if (!extensible.FromJust()) {
- isolate->Throw(*factory->NewTypeError(
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxyDeletePropertyNonExtensible, name));
return Nothing<bool>();
}
}
-
return Just(true);
}
@@ -3269,7 +3279,11 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
new_len_desc, should_throw);
}
// 13. If oldLenDesc.[[Writable]] is false, return false.
- if (!old_len_desc.writable()) {
+ if (!old_len_desc.writable() ||
+ // Also handle the {configurable: true} case since we later use
+ // JSArray::SetLength instead of OrdinaryDefineOwnProperty to change
+ // the length, and it doesn't have access to the descriptor anymore.
+ new_len_desc->configurable()) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
isolate->factory()->length_string()));
@@ -4294,8 +4308,10 @@ bool DescriptorArray::IsEqualTo(DescriptorArray other) {
if (number_of_all_descriptors() != other.number_of_all_descriptors()) {
return false;
}
- for (int i = 0; i < number_of_all_descriptors(); ++i) {
- if (get(i) != other.get(i)) return false;
+ for (int i = 0; i < number_of_descriptors(); ++i) {
+ if (GetKey(i) != other.GetKey(i)) return false;
+ if (GetDetails(i).AsSmi() != other.GetDetails(i).AsSmi()) return false;
+ if (GetValue(i) != other.GetValue(i)) return false;
}
return true;
}
@@ -4500,7 +4516,8 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
return value;
}
-Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
+ Handle<Context> native_context,
Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
@@ -4512,13 +4529,12 @@ Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- Map maybe_elements_transition = current_map->ElementsTransitionMap();
+ Map maybe_elements_transition = current_map->ElementsTransitionMap(isolate);
if (!maybe_elements_transition.is_null()) {
- new_map = handle(maybe_elements_transition, native_context->GetIsolate());
+ new_map = handle(maybe_elements_transition, isolate);
} else {
- new_map =
- Map::CopyAsElementsKind(native_context->GetIsolate(), current_map,
- next_kind, INSERT_TRANSITION);
+ new_map = Map::CopyAsElementsKind(isolate, current_map, next_kind,
+ INSERT_TRANSITION);
}
DCHECK_EQ(next_kind, new_map->elements_kind());
native_context->set(Context::ArrayMapIndex(next_kind), *new_map);
@@ -4855,22 +4871,12 @@ std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::ToTracedValue(
const char* SharedFunctionInfo::kTraceScope =
"v8::internal::SharedFunctionInfo";
-uint64_t SharedFunctionInfo::TraceID() const {
- // TODO(bmeurer): We use a combination of Script ID and function literal
- // ID (within the Script) to uniquely identify SharedFunctionInfos. This
- // can add significant overhead, and we should probably find a better way
- // to uniquely identify SharedFunctionInfos over time.
+uint64_t SharedFunctionInfo::TraceID(FunctionLiteral* literal) const {
+ int literal_id =
+ literal ? literal->function_literal_id() : function_literal_id();
Script script = Script::cast(this->script());
- WeakFixedArray script_functions = script.shared_function_infos();
- for (int i = 0; i < script_functions.length(); ++i) {
- HeapObject script_function;
- if (script_functions.Get(i).GetHeapObjectIfWeak(&script_function) &&
- script_function.address() == address()) {
- return (static_cast<uint64_t>(script.id() + 1) << 32) |
- (static_cast<uint64_t>(i));
- }
- }
- UNREACHABLE();
+ return (static_cast<uint64_t>(script.id() + 1) << 32) |
+ (static_cast<uint64_t>(literal_id));
}
std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::TraceIDRef()
@@ -4946,21 +4952,17 @@ WasmCapiFunctionData SharedFunctionInfo::wasm_capi_function_data() const {
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
Script script)
- : ScriptIterator(isolate, handle(script.shared_function_infos(), isolate)) {
-}
+ : ScriptIterator(handle(script.shared_function_infos(), isolate)) {}
SharedFunctionInfo::ScriptIterator::ScriptIterator(
- Isolate* isolate, Handle<WeakFixedArray> shared_function_infos)
- : isolate_(isolate),
- shared_function_infos_(shared_function_infos),
- index_(0) {}
+ Handle<WeakFixedArray> shared_function_infos)
+ : shared_function_infos_(shared_function_infos), index_(0) {}
SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
while (index_ < shared_function_infos_->length()) {
MaybeObject raw = shared_function_infos_->Get(index_++);
HeapObject heap_object;
- if (!raw->GetHeapObject(&heap_object) ||
- heap_object.IsUndefined(isolate_)) {
+ if (!raw->GetHeapObject(&heap_object) || heap_object.IsUndefined()) {
continue;
}
return SharedFunctionInfo::cast(heap_object);
@@ -4968,13 +4970,15 @@ SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
return SharedFunctionInfo();
}
-void SharedFunctionInfo::ScriptIterator::Reset(Script script) {
- shared_function_infos_ = handle(script.shared_function_infos(), isolate_);
+void SharedFunctionInfo::ScriptIterator::Reset(Isolate* isolate,
+ Script script) {
+ shared_function_infos_ = handle(script.shared_function_infos(), isolate);
index_ = 0;
}
SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
- : script_iterator_(isolate),
+ : isolate_(isolate),
+ script_iterator_(isolate),
noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
sfi_iterator_(isolate, script_iterator_.Next()) {}
@@ -4986,7 +4990,7 @@ SharedFunctionInfo SharedFunctionInfo::GlobalIterator::Next() {
if (!next.is_null()) return SharedFunctionInfo::cast(next);
Script next_script = script_iterator_.Next();
if (next_script.is_null()) return SharedFunctionInfo();
- sfi_iterator_.Reset(next_script);
+ sfi_iterator_.Reset(isolate_, next_script);
}
}
@@ -5148,7 +5152,6 @@ void SharedFunctionInfo::DiscardCompiled(
handle(shared_info->inferred_name(), isolate);
int start_position = shared_info->StartPosition();
int end_position = shared_info->EndPosition();
- int function_literal_id = shared_info->FunctionLiteralId(isolate);
shared_info->DiscardCompiledMetadata(isolate);
@@ -5163,8 +5166,7 @@ void SharedFunctionInfo::DiscardCompiled(
// validity checks, since we're performing the unusual task of decompiling.
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreparseData(
- inferred_name_val, start_position, end_position,
- function_literal_id);
+ inferred_name_val, start_position, end_position);
shared_info->set_function_data(*data);
}
}
@@ -5273,28 +5275,6 @@ bool SharedFunctionInfo::IsInlineable() {
int SharedFunctionInfo::SourceSize() { return EndPosition() - StartPosition(); }
-int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
- DisallowHeapAllocation no_gc;
-
- Object script_obj = script();
- if (!script_obj.IsScript()) return kFunctionLiteralIdInvalid;
-
- WeakFixedArray shared_info_list =
- Script::cast(script_obj).shared_function_infos();
- SharedFunctionInfo::ScriptIterator iterator(
- isolate,
- Handle<WeakFixedArray>(reinterpret_cast<Address*>(&shared_info_list)));
-
- for (SharedFunctionInfo shared = iterator.Next(); !shared.is_null();
- shared = iterator.Next()) {
- if (shared == *this) {
- return iterator.CurrentIndex();
- }
- }
-
- return kFunctionLiteralIdInvalid;
-}
-
// Output the source code without any allocation in the heap.
std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
const SharedFunctionInfo s = v.value;
@@ -5365,6 +5345,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_language_mode(lit->language_mode());
shared_info->set_is_wrapped(lit->is_wrapped());
+ shared_info->set_function_literal_id(lit->function_literal_id());
// shared_info->set_kind(lit->kind());
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
@@ -5409,7 +5390,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithPreparseData(
lit->inferred_name(), lit->start_position(), lit->end_position(),
- lit->function_literal_id(), preparse_data);
+ preparse_data);
shared_info->set_uncompiled_data(*data);
needs_position_info = false;
}
@@ -5418,8 +5399,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (needs_position_info) {
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreparseData(
- lit->inferred_name(), lit->start_position(), lit->end_position(),
- lit->function_literal_id());
+ lit->inferred_name(), lit->start_position(), lit->end_position());
shared_info->set_uncompiled_data(*data);
}
}
@@ -5510,21 +5490,6 @@ int SharedFunctionInfo::EndPosition() const {
return kNoSourcePosition;
}
-int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
- // Fast path for the common case when the SFI is uncompiled and so the
- // function literal id is already in the uncompiled data.
- if (HasUncompiledData() && uncompiled_data().has_function_literal_id()) {
- int id = uncompiled_data().function_literal_id();
- // Make sure the id is what we should have found with the slow path.
- DCHECK_EQ(id, FindIndexInScript(isolate));
- return id;
- }
-
- // Otherwise, search for the function in the SFI's script's function list,
- // and return its index in that list.
- return FindIndexInScript(isolate);
-}
-
void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info.IsScopeInfo()) {
@@ -5561,16 +5526,6 @@ void SharedFunctionInfo::EnsureSourcePositionsAvailable(
}
}
-bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
- if (length() != other.length()) return false;
-
- for (int i = 0; i < length(); ++i) {
- if (get(i) != other.get(i)) return false;
- }
-
- return true;
-}
-
// static
void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
DCHECK_GE(capacity, 0);
@@ -6128,42 +6083,14 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
namespace {
-constexpr JSRegExp::Flag kCharFlagValues[] = {
- JSRegExp::kGlobal, // g
- JSRegExp::kInvalid, // h
- JSRegExp::kIgnoreCase, // i
- JSRegExp::kInvalid, // j
- JSRegExp::kInvalid, // k
- JSRegExp::kInvalid, // l
- JSRegExp::kMultiline, // m
- JSRegExp::kInvalid, // n
- JSRegExp::kInvalid, // o
- JSRegExp::kInvalid, // p
- JSRegExp::kInvalid, // q
- JSRegExp::kInvalid, // r
- JSRegExp::kDotAll, // s
- JSRegExp::kInvalid, // t
- JSRegExp::kUnicode, // u
- JSRegExp::kInvalid, // v
- JSRegExp::kInvalid, // w
- JSRegExp::kInvalid, // x
- JSRegExp::kSticky, // y
-};
-
-constexpr JSRegExp::Flag CharToFlag(uc16 flag_char) {
- return (flag_char < 'g' || flag_char > 'y')
- ? JSRegExp::kInvalid
- : kCharFlagValues[flag_char - 'g'];
-}
-
JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
bool* success) {
- STATIC_ASSERT(CharToFlag('g') == JSRegExp::kGlobal);
- STATIC_ASSERT(CharToFlag('i') == JSRegExp::kIgnoreCase);
- STATIC_ASSERT(CharToFlag('m') == JSRegExp::kMultiline);
- STATIC_ASSERT(CharToFlag('s') == JSRegExp::kDotAll);
- STATIC_ASSERT(CharToFlag('u') == JSRegExp::kUnicode);
- STATIC_ASSERT(CharToFlag('y') == JSRegExp::kSticky);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('g') == JSRegExp::kGlobal);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('i') == JSRegExp::kIgnoreCase);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('m') == JSRegExp::kMultiline);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('s') == JSRegExp::kDotAll);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('u') == JSRegExp::kUnicode);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('y') == JSRegExp::kSticky);
int length = flags->length();
if (length == 0) {
@@ -6171,14 +6098,14 @@ JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
return JSRegExp::kNone;
}
// A longer flags string cannot be valid.
- if (length > JSRegExp::FlagCount()) return JSRegExp::Flags(0);
+ if (length > JSRegExp::kFlagCount) return JSRegExp::Flags(0);
// Initialize {value} to {kInvalid} to allow 2-in-1 duplicate/invalid check.
JSRegExp::Flags value = JSRegExp::kInvalid;
if (flags->IsSeqOneByteString()) {
DisallowHeapAllocation no_gc;
SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = CharToFlag(seq_flags.Get(i));
+ JSRegExp::Flag flag = JSRegExp::FlagFromChar(seq_flags.Get(i));
// Duplicate or invalid flag.
if (value & flag) return JSRegExp::Flags(0);
value |= flag;
@@ -6188,7 +6115,7 @@ JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
DisallowHeapAllocation no_gc;
String::FlatContent flags_content = flags->GetFlatContent(no_gc);
for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = CharToFlag(flags_content.Get(i));
+ JSRegExp::Flag flag = JSRegExp::FlagFromChar(flags_content.Get(i));
// Duplicate or invalid flag.
if (value & flag) return JSRegExp::Flags(0);
value |= flag;
@@ -6224,15 +6151,20 @@ template <typename Char>
int CountRequiredEscapes(Handle<String> source) {
DisallowHeapAllocation no_gc;
int escapes = 0;
+ bool in_char_class = false;
Vector<const Char> src = source->GetCharVector<Char>(no_gc);
for (int i = 0; i < src.length(); i++) {
const Char c = src[i];
if (c == '\\') {
// Escape. Skip next character;
i++;
- } else if (c == '/') {
+ } else if (c == '/' && !in_char_class) {
// Not escaped forward-slash needs escape.
escapes++;
+ } else if (c == '[') {
+ in_char_class = true;
+ } else if (c == ']') {
+ in_char_class = false;
} else if (c == '\n') {
escapes++;
} else if (c == '\r') {
@@ -6245,6 +6177,7 @@ int CountRequiredEscapes(Handle<String> source) {
DCHECK(!unibrow::IsLineTerminator(static_cast<unibrow::uchar>(c)));
}
}
+ DCHECK(!in_char_class);
return escapes;
}
@@ -6262,16 +6195,19 @@ Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
Vector<Char> dst(result->GetChars(no_gc), result->length());
int s = 0;
int d = 0;
- // TODO(v8:1982): Fully implement
- // https://tc39.github.io/ecma262/#sec-escaperegexppattern
+ bool in_char_class = false;
while (s < src.length()) {
if (src[s] == '\\') {
// Escape. Copy this and next character.
dst[d++] = src[s++];
if (s == src.length()) break;
- } else if (src[s] == '/') {
+ } else if (src[s] == '/' && !in_char_class) {
// Not escaped forward-slash needs escape.
dst[d++] = '\\';
+ } else if (src[s] == '[') {
+ in_char_class = true;
+ } else if (src[s] == ']') {
+ in_char_class = false;
} else if (src[s] == '\n') {
WriteStringToCharVector(dst, &d, "\\n");
s++;
@@ -6292,6 +6228,7 @@ Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
dst[d++] = src[s++];
}
DCHECK_EQ(result->length(), d);
+ DCHECK(!in_char_class);
return result;
}
@@ -6348,13 +6285,13 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
source = String::Flatten(isolate, source);
+ RETURN_ON_EXCEPTION(isolate, RegExp::Compile(isolate, regexp, source, flags),
+ JSRegExp);
+
Handle<String> escaped_source;
ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
EscapeRegExpSource(isolate, source), JSRegExp);
- RETURN_ON_EXCEPTION(
- isolate, RegExpImpl::Compile(isolate, regexp, source, flags), JSRegExp);
-
regexp->set_source(*escaped_source);
regexp->set_flags(Smi::FromInt(flags));
@@ -6701,8 +6638,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
} else { // !FLAG_thin_strings
if (string->IsConsString()) {
Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(isolate, *result);
- cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ cons->set_first(*result);
+ cons->set_second(ReadOnlyRoots(isolate).empty_string());
} else if (string->IsSlicedString()) {
STATIC_ASSERT(static_cast<int>(ConsString::kSize) ==
static_cast<int>(SlicedString::kSize));
@@ -6713,8 +6650,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
: isolate->factory()->cons_string_map();
string->set_map(*map);
Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(isolate, *result);
- cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ cons->set_first(*result);
+ cons->set_second(ReadOnlyRoots(isolate).empty_string());
}
}
return result;
@@ -7925,9 +7862,13 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
// static
void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
+ const char* cell_name,
Handle<PropertyCell> cell,
Handle<Object> new_value) {
if (cell->value() != *new_value) {
+ if (FLAG_trace_protector_invalidation) {
+ isolate->TraceProtectorInvalidation(cell_name);
+ }
cell->set_value(*new_value);
cell->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
@@ -8127,7 +8068,9 @@ HashTable<NameDictionary, NameDictionaryShape>::Shrink(Isolate* isolate,
int additionalCapacity);
void JSFinalizationGroup::Cleanup(
- Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
+ Isolate* isolate, Handle<JSFinalizationGroup> finalization_group,
+ Handle<Object> cleanup) {
+ DCHECK(cleanup->IsCallable());
// It's possible that the cleared_cells list is empty, since
// FinalizationGroup.unregister() removed all its elements before this task
// ran. In that case, don't call the cleanup function.
@@ -8145,7 +8088,6 @@ void JSFinalizationGroup::Cleanup(
Handle<AllocationSite>::null()));
iterator->set_finalization_group(*finalization_group);
}
- Handle<Object> cleanup(finalization_group->cleanup(), isolate);
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
v8::Local<v8::Value> result;
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 857f3ed0f6..d706b2dfb7 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -15,10 +15,11 @@
#include "src/base/build_config.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
+#include "src/base/memory.h"
#include "src/codegen/constants-arch.h"
#include "src/common/assert-scope.h"
#include "src/common/checks.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/flags/flags.h"
#include "src/objects/elements-kind.h"
#include "src/objects/field-index.h"
@@ -49,22 +50,21 @@
// - JSCollection
// - JSSet
// - JSMap
-// - JSStringIterator
-// - JSSetIterator
-// - JSMapIterator
-// - JSWeakCollection
-// - JSWeakMap
-// - JSWeakSet
-// - JSRegExp
+// - JSDate
// - JSFunction
// - JSGeneratorObject
// - JSGlobalObject
// - JSGlobalProxy
-// - JSValue
-// - JSDate
+// - JSMapIterator
// - JSMessageObject
// - JSModuleNamespace
-// - JSV8BreakIterator // If V8_INTL_SUPPORT enabled.
+// - JSPrimitiveWrapper
+// - JSRegExp
+// - JSSetIterator
+// - JSStringIterator
+// - JSWeakCollection
+// - JSWeakMap
+// - JSWeakSet
// - JSCollator // If V8_INTL_SUPPORT enabled.
// - JSDateTimeFormat // If V8_INTL_SUPPORT enabled.
// - JSListFormat // If V8_INTL_SUPPORT enabled.
@@ -72,8 +72,9 @@
// - JSNumberFormat // If V8_INTL_SUPPORT enabled.
// - JSPluralRules // If V8_INTL_SUPPORT enabled.
// - JSRelativeTimeFormat // If V8_INTL_SUPPORT enabled.
-// - JSSegmentIterator // If V8_INTL_SUPPORT enabled.
// - JSSegmenter // If V8_INTL_SUPPORT enabled.
+// - JSSegmentIterator // If V8_INTL_SUPPORT enabled.
+// - JSV8BreakIterator // If V8_INTL_SUPPORT enabled.
// - WasmExceptionObject
// - WasmGlobalObject
// - WasmInstanceObject
@@ -99,7 +100,7 @@
// - TemplateList
// - TransitionArray
// - ScopeInfo
-// - ModuleInfo
+// - SourceTextModuleInfo
// - ScriptContextTable
// - ClosureFeedbackCellArray
// - FixedDoubleArray
@@ -170,7 +171,9 @@
// - PromiseRejectReactionJobTask
// - PromiseResolveThenableJobTask
// - Module
-// - ModuleInfoEntry
+// - SourceTextModule
+// - SyntheticModule
+// - SourceTextModuleInfoEntry
// - FeedbackCell
// - FeedbackVector
// - PreparseData
@@ -265,9 +268,13 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
constexpr Object() : TaggedImpl(kNullAddress) {}
explicit constexpr Object(Address ptr) : TaggedImpl(ptr) {}
-#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
+#define IS_TYPE_FUNCTION_DECL(Type) \
+ V8_INLINE bool Is##Type() const; \
+ V8_INLINE bool Is##Type(Isolate* isolate) const;
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(HashTableBase)
+ IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
#undef IS_TYPE_FUNCTION_DECL
// Oddball checks are faster when they are raw pointer comparisons, so the
@@ -277,18 +284,17 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
#undef IS_TYPE_FUNCTION_DECL
- V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
- V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
- V8_INLINE bool IsNullOrUndefined() const;
-
V8_INLINE bool IsZero() const;
V8_INLINE bool IsNoSharedNameSentinel() const;
enum class Conversion { kToNumber, kToNumeric };
-#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
+ V8_INLINE bool Is##Name() const; \
+ V8_INLINE bool Is##Name(Isolate* isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -296,9 +302,6 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_INLINE
V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<Object> object);
- V8_INLINE bool IsHashTableBase() const;
- V8_INLINE bool IsSmallOrderedHashTable() const;
-
// Extract the number.
inline double Number() const;
V8_INLINE bool IsNaN() const;
@@ -306,9 +309,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
inline bool ToUint32(uint32_t* value) const;
- inline Representation OptimalRepresentation();
+ inline Representation OptimalRepresentation(Isolate* isolate) const;
- inline ElementsKind OptimalElementsKind();
+ inline ElementsKind OptimalElementsKind(Isolate* isolate) const;
inline bool FitsRepresentation(Representation representation);
@@ -624,9 +627,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#endif
if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
// Bug(v8:8875) Double fields may be unaligned.
- return ReadUnalignedValue<T>(field_address(offset));
+ return base::ReadUnalignedValue<T>(field_address(offset));
} else {
- return Memory<T>(field_address(offset));
+ return base::Memory<T>(field_address(offset));
}
}
@@ -641,9 +644,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#endif
if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
// Bug(v8:8875) Double fields may be unaligned.
- WriteUnalignedValue<T>(field_address(offset), value);
+ base::WriteUnalignedValue<T>(field_address(offset), value);
} else {
- Memory<T>(field_address(offset)) = value;
+ base::Memory<T>(field_address(offset)) = value;
}
}
@@ -743,13 +746,13 @@ class MapWord {
// View this map word as a forwarding address.
inline HeapObject ToForwardingAddress();
- static inline MapWord FromRawValue(uintptr_t value) { return MapWord(value); }
-
- inline uintptr_t ToRawValue() { return value_; }
+ inline Address ptr() { return value_; }
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
+ template <typename TFieldType, int kFieldOffset>
+ friend class TaggedField;
explicit MapWord(Address value) : value_(value) {}
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index e0d77b9043..bcca03ddca 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -22,7 +22,7 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(Oddball)
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
// Bug(v8:8875): HeapNumber's double may be unaligned.
- WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
+ base::WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
}
byte Oddball::kind() const {
@@ -38,8 +38,8 @@ Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
return Handle<Object>(input->to_number(), isolate);
}
-bool HeapObject::IsBoolean() const {
- return IsOddball() &&
+DEF_GETTER(HeapObject, IsBoolean, bool) {
+ return IsOddball(isolate) &&
((Oddball::cast(*this).kind() & Oddball::kNotBooleanMask) == 0);
}
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 0eaa7567e2..a2270b0a4a 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -54,7 +54,7 @@ template <class Derived>
Object SmallOrderedHashTable<Derived>::KeyAt(int entry) const {
DCHECK_LT(entry, Capacity());
Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
- return READ_FIELD(*this, entry_offset);
+ return TaggedField<Object>::load(*this, entry_offset);
}
template <class Derived>
@@ -63,7 +63,7 @@ Object SmallOrderedHashTable<Derived>::GetDataEntry(int entry,
DCHECK_LT(entry, Capacity());
DCHECK_LE(static_cast<unsigned>(relative_index), Derived::kEntrySize);
Offset entry_offset = GetDataEntryOffset(entry, relative_index);
- return READ_FIELD(*this, entry_offset);
+ return TaggedField<Object>::load(*this, entry_offset);
}
OBJECT_CONSTRUCTORS_IMPL(SmallOrderedHashSet,
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index c4e64d2d6a..463d0e0384 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -508,6 +508,8 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
SetNumberOfBuckets(num_buckets);
SetNumberOfElements(0);
SetNumberOfDeletedElements(0);
+ memset(reinterpret_cast<void*>(field_address(PaddingOffset())), 0,
+ PaddingSize());
Address hashtable_start = GetHashTableStartAddress(capacity);
memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
@@ -930,7 +932,6 @@ OrderedHashTableHandler<SmallOrderedNameDictionary,
OrderedNameDictionary>::Allocate(Isolate* isolate,
int capacity);
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
Handle<HeapObject> table, Handle<Object> key) {
@@ -943,9 +944,7 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
// down to a smaller hash table.
return LargeTable::Delete(Handle<LargeTable>::cast(table), key);
}
-#endif
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
@@ -956,7 +955,6 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
DCHECK(LargeTable::Is(table));
return LargeTable::HasKey(isolate, LargeTable::cast(*table), *key);
}
-#endif
template bool
OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::HasKey(
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index a83109ed90..66dc36e81f 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -527,8 +527,16 @@ class SmallOrderedHashTable : public HeapObject {
return NumberOfDeletedElementsOffset() + kOneByteSize;
}
+ static constexpr Offset PaddingOffset() {
+ return NumberOfBucketsOffset() + kOneByteSize;
+ }
+
+ static constexpr size_t PaddingSize() {
+ return RoundUp<kTaggedSize>(PaddingOffset()) - PaddingOffset();
+ }
+
static constexpr Offset DataTableStartOffset() {
- return RoundUp<kTaggedSize>(NumberOfBucketsOffset());
+ return PaddingOffset() + PaddingSize();
}
static constexpr int DataTableSizeFor(int capacity) {
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index f23e63e50d..e9365c03a4 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -21,10 +21,19 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(PropertyArray, HeapObject)
CAST_ACCESSOR(PropertyArray)
+SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
+SYNCHRONIZED_SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
+
Object PropertyArray::get(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object PropertyArray::get(Isolate* isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
- return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
+ return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void PropertyArray::set(int index, Object value) {
@@ -47,34 +56,24 @@ void PropertyArray::set(int index, Object value, WriteBarrierMode mode) {
ObjectSlot PropertyArray::data_start() { return RawField(kHeaderSize); }
int PropertyArray::length() const {
- Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return LengthField::decode(value);
+ return LengthField::decode(length_and_hash());
}
void PropertyArray::initialize_length(int len) {
- DCHECK_LT(static_cast<unsigned>(len),
- static_cast<unsigned>(LengthField::kMax));
- WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(len));
+ DCHECK(LengthField::is_valid(len));
+ set_length_and_hash(len);
}
int PropertyArray::synchronized_length() const {
- Object value_obj = ACQUIRE_READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return LengthField::decode(value);
+ return LengthField::decode(synchronized_length_and_hash());
}
-int PropertyArray::Hash() const {
- Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return HashField::decode(value);
-}
+int PropertyArray::Hash() const { return HashField::decode(length_and_hash()); }
void PropertyArray::SetHash(int hash) {
- Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
+ int value = length_and_hash();
value = HashField::update(value, hash);
- WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(value));
+ set_length_and_hash(value);
}
void PropertyArray::CopyElements(Isolate* isolate, int dst_index,
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 0c8b40ece2..5c71330280 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -30,6 +30,7 @@ class PropertyArray : public HeapObject {
inline int Hash() const;
inline Object get(int index) const;
+ inline Object get(Isolate* isolate, int index) const;
inline void set(int index, Object value);
// Setter with explicit barrier mode.
@@ -67,6 +68,11 @@ class PropertyArray : public HeapObject {
static const int kNoHashSentinel = 0;
+ private:
+ DECL_INT_ACCESSORS(length_and_hash)
+
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length_and_hash)
+
OBJECT_CONSTRUCTORS(PropertyArray, HeapObject);
};
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index 75a5132728..b336986f62 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -47,7 +47,7 @@ class PropertyCell : public HeapObject {
static Handle<PropertyCell> InvalidateEntry(
Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry);
- static void SetValueWithInvalidation(Isolate* isolate,
+ static void SetValueWithInvalidation(Isolate* isolate, const char* cell_name,
Handle<PropertyCell> cell,
Handle<Object> new_value);
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
index c226c28a76..fba6fe3405 100644
--- a/deps/v8/src/objects/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -75,9 +75,10 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
+ Isolate* isolate = GetIsolateForPtrCompr(*key);
return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
kDescriptor, PropertyConstness::kConst,
- value->OptimalRepresentation(), 0);
+ value->OptimalRepresentation(isolate), 0);
}
Descriptor Descriptor::DataConstant(Isolate* isolate, Handle<Name> key,
diff --git a/deps/v8/src/objects/prototype-inl.h b/deps/v8/src/objects/prototype-inl.h
index 5f7c3e23c5..2836186b12 100644
--- a/deps/v8/src/objects/prototype-inl.h
+++ b/deps/v8/src/objects/prototype-inl.h
@@ -48,7 +48,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, Map receiver_map,
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
DCHECK(object_.IsJSReceiver());
Map map = JSReceiver::cast(object_).map();
- is_at_end_ = !map.has_hidden_prototype();
+ is_at_end_ = !map.IsJSGlobalProxyMap();
}
}
@@ -63,7 +63,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
DCHECK(handle_->IsJSReceiver());
Map map = JSReceiver::cast(*handle_).map();
- is_at_end_ = !map.has_hidden_prototype();
+ is_at_end_ = !map.IsJSGlobalProxyMap();
}
}
@@ -96,8 +96,9 @@ void PrototypeIterator::AdvanceIgnoringProxies() {
Map map = HeapObject::cast(object).map();
HeapObject prototype = map.prototype();
- is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN ? !map.has_hidden_prototype()
- : prototype.IsNull(isolate_);
+ is_at_end_ =
+ prototype.IsNull(isolate_) ||
+ (where_to_end_ == END_AT_NON_HIDDEN && !map.IsJSGlobalProxyMap());
if (handle_.is_null()) {
object_ = prototype;
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index af45e86af3..eca8bc1ecd 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -45,8 +45,9 @@ bool ScopeInfo::Equals(ScopeInfo other) const {
if (!ScopeInfo::cast(entry).Equals(ScopeInfo::cast(other_entry))) {
return false;
}
- } else if (entry.IsModuleInfo()) {
- if (!ModuleInfo::cast(entry).Equals(ModuleInfo::cast(other_entry))) {
+ } else if (entry.IsSourceTextModuleInfo()) {
+ if (!SourceTextModuleInfo::cast(entry).Equals(
+ SourceTextModuleInfo::cast(other_entry))) {
return false;
}
} else {
@@ -217,6 +218,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
uint32_t info =
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
+ RequiresBrandCheckField::encode(
+ var->get_requires_brand_check_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info.set(context_local_base + local_index, *var->name(), mode);
@@ -233,6 +236,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ RequiresBrandCheckField::encode(
+ var->get_requires_brand_check_flag()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
Smi::FromInt(properties));
@@ -271,6 +276,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ RequiresBrandCheckField::encode(
+ var->get_requires_brand_check_flag()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info.set(context_local_base + local_index, *var->name(), mode);
scope_info.set(context_local_info_base + local_index,
@@ -327,8 +334,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
// Module-specific information (only for module scopes).
if (scope->is_module_scope()) {
- Handle<ModuleInfo> module_info =
- ModuleInfo::New(isolate, zone, scope->AsModuleScope()->module());
+ Handle<SourceTextModuleInfo> module_info = SourceTextModuleInfo::New(
+ isolate, zone, scope->AsModuleScope()->module());
DCHECK_EQ(index, scope_info_handle->ModuleInfoIndex());
scope_info_handle->set(index++, *module_info);
DCHECK_EQ(index, scope_info_handle->ModuleVariableCountIndex());
@@ -444,6 +451,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
VariableModeField::encode(VariableMode::kConst) |
InitFlagField::encode(kCreatedInitialized) |
MaybeAssignedFlagField::encode(kNotAssigned) |
+ RequiresBrandCheckField::encode(kNoBrandCheck) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info->set(index++, Smi::FromInt(value));
}
@@ -649,9 +657,9 @@ ScopeInfo ScopeInfo::OuterScopeInfo() const {
return ScopeInfo::cast(get(OuterScopeInfoIndex()));
}
-ModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
+SourceTextModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
DCHECK(scope_type() == MODULE_SCOPE);
- return ModuleInfo::cast(get(ModuleInfoIndex()));
+ return SourceTextModuleInfo::cast(get(ModuleInfoIndex()));
}
String ScopeInfo::ContextLocalName(int var) const {
@@ -700,6 +708,14 @@ MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) const {
return MaybeAssignedFlagField::decode(value);
}
+RequiresBrandCheckFlag ScopeInfo::RequiresBrandCheck(int var) const {
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalInfosIndex() + var;
+ int value = Smi::ToInt(get(info_index));
+ return RequiresBrandCheckField::decode(value);
+}
+
// static
bool ScopeInfo::VariableIsSynthetic(String name) {
// There's currently no flag stored on the ScopeInfo to indicate that a
@@ -739,7 +755,8 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
VariableMode* mode,
InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag) {
+ MaybeAssignedFlag* maybe_assigned_flag,
+ RequiresBrandCheckFlag* requires_brand_check) {
DisallowHeapAllocation no_gc;
DCHECK(name.IsInternalizedString());
DCHECK_NOT_NULL(mode);
@@ -756,6 +773,7 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
*mode = scope_info.ContextLocalMode(var);
*init_flag = scope_info.ContextLocalInitFlag(var);
*maybe_assigned_flag = scope_info.ContextLocalMaybeAssignedFlag(var);
+ *requires_brand_check = scope_info.RequiresBrandCheck(var);
int result = Context::MIN_CONTEXT_SLOTS + var;
DCHECK_LT(result, scope_info.ContextLength());
@@ -873,15 +891,13 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
- Handle<Object> export_name,
- Handle<Object> local_name,
- Handle<Object> import_name,
- int module_request, int cell_index,
- int beg_pos, int end_pos) {
- Handle<ModuleInfoEntry> result =
- Handle<ModuleInfoEntry>::cast(isolate->factory()->NewStruct(
- MODULE_INFO_ENTRY_TYPE, AllocationType::kOld));
+Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
+ Isolate* isolate, Handle<Object> export_name, Handle<Object> local_name,
+ Handle<Object> import_name, int module_request, int cell_index, int beg_pos,
+ int end_pos) {
+ Handle<SourceTextModuleInfoEntry> result =
+ Handle<SourceTextModuleInfoEntry>::cast(isolate->factory()->NewStruct(
+ SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, AllocationType::kOld));
result->set_export_name(*export_name);
result->set_local_name(*local_name);
result->set_import_name(*import_name);
@@ -892,8 +908,8 @@ Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
return result;
}
-Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
- ModuleDescriptor* descr) {
+Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
+ Isolate* isolate, Zone* zone, SourceTextModuleDescriptor* descr) {
// Serialize module requests.
int size = static_cast<int>(descr->module_requests().size());
Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(size);
@@ -911,7 +927,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (auto entry : descr->special_exports()) {
- Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+ Handle<SourceTextModuleInfoEntry> serialized_entry =
+ entry->Serialize(isolate);
special_exports->set(i++, *serialized_entry);
}
}
@@ -922,7 +939,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (auto entry : descr->namespace_imports()) {
- Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+ Handle<SourceTextModuleInfoEntry> serialized_entry =
+ entry->Serialize(isolate);
namespace_imports->set(i++, *serialized_entry);
}
}
@@ -937,13 +955,14 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (const auto& elem : descr->regular_imports()) {
- Handle<ModuleInfoEntry> serialized_entry =
+ Handle<SourceTextModuleInfoEntry> serialized_entry =
elem.second->Serialize(isolate);
regular_imports->set(i++, *serialized_entry);
}
}
- Handle<ModuleInfo> result = isolate->factory()->NewModuleInfo();
+ Handle<SourceTextModuleInfo> result =
+ isolate->factory()->NewSourceTextModuleInfo();
result->set(kModuleRequestsIndex, *module_requests);
result->set(kSpecialExportsIndex, *special_exports);
result->set(kRegularExportsIndex, *regular_exports);
@@ -953,22 +972,22 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
return result;
}
-int ModuleInfo::RegularExportCount() const {
+int SourceTextModuleInfo::RegularExportCount() const {
DCHECK_EQ(regular_exports().length() % kRegularExportLength, 0);
return regular_exports().length() / kRegularExportLength;
}
-String ModuleInfo::RegularExportLocalName(int i) const {
+String SourceTextModuleInfo::RegularExportLocalName(int i) const {
return String::cast(regular_exports().get(i * kRegularExportLength +
kRegularExportLocalNameOffset));
}
-int ModuleInfo::RegularExportCellIndex(int i) const {
+int SourceTextModuleInfo::RegularExportCellIndex(int i) const {
return Smi::ToInt(regular_exports().get(i * kRegularExportLength +
kRegularExportCellIndexOffset));
}
-FixedArray ModuleInfo::RegularExportExportNames(int i) const {
+FixedArray SourceTextModuleInfo::RegularExportExportNames(int i) const {
return FixedArray::cast(regular_exports().get(
i * kRegularExportLength + kRegularExportExportNamesOffset));
}
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 8d43357631..0b8eb61b00 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -22,7 +22,7 @@ class Handle;
class Isolate;
template <typename T>
class MaybeHandle;
-class ModuleInfo;
+class SourceTextModuleInfo;
class Scope;
class Zone;
@@ -113,7 +113,7 @@ class ScopeInfo : public FixedArray {
int EndPosition() const;
void SetPositionInfo(int start, int end);
- ModuleInfo ModuleDescriptorInfo() const;
+ SourceTextModuleInfo ModuleDescriptorInfo() const;
// Return the name of the given context local.
String ContextLocalName(int var) const;
@@ -130,6 +130,9 @@ class ScopeInfo : public FixedArray {
// Return the initialization flag of the given context local.
MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var) const;
+ // Return whether access to the variable requries a brand check.
+ RequiresBrandCheckFlag RequiresBrandCheck(int var) const;
+
// Return true if this local was introduced by the compiler, and should not be
// exposed to the user in a debugger.
static bool VariableIsSynthetic(String name);
@@ -141,7 +144,8 @@ class ScopeInfo : public FixedArray {
// mode for that variable.
static int ContextSlotIndex(ScopeInfo scope_info, String name,
VariableMode* mode, InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
+ MaybeAssignedFlag* maybe_assigned_flag,
+ RequiresBrandCheckFlag* requires_brand_check);
// Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
// module variable with the given name (the index value of a MODULE variable
@@ -284,10 +288,10 @@ class ScopeInfo : public FixedArray {
// the scope belongs to a function or script.
// 7. OuterScopeInfoIndex:
// The outer scope's ScopeInfo or the hole if there's none.
- // 8. ModuleInfo, ModuleVariableCount, and ModuleVariables:
- // For a module scope, this part contains the ModuleInfo, the number of
- // MODULE-allocated variables, and the metadata of those variables. For
- // non-module scopes it is empty.
+ // 8. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables:
+ // For a module scope, this part contains the SourceTextModuleInfo, the
+ // number of MODULE-allocated variables, and the metadata of those
+ // variables. For non-module scopes it is empty.
int ContextLocalNamesIndex() const;
int ContextLocalInfosIndex() const;
int ReceiverInfoIndex() const;
@@ -322,8 +326,11 @@ class ScopeInfo : public FixedArray {
class VariableModeField : public BitField<VariableMode, 0, 3> {};
class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
+ class RequiresBrandCheckField
+ : public BitField<RequiresBrandCheckFlag, MaybeAssignedFlagField::kNext,
+ 1> {};
class ParameterNumberField
- : public BitField<uint32_t, MaybeAssignedFlagField::kNext, 16> {};
+ : public BitField<uint32_t, RequiresBrandCheckField::kNext, 16> {};
friend class ScopeIterator;
friend std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index f5413ce1de..9778db5d90 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -91,7 +91,6 @@ CAST_ACCESSOR(UncompiledData)
ACCESSORS(UncompiledData, inferred_name, String, kInferredNameOffset)
INT32_ACCESSORS(UncompiledData, start_position, kStartPositionOffset)
INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset)
-INT32_ACCESSORS(UncompiledData, function_literal_id, kFunctionLiteralIdOffset)
void UncompiledData::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) == 0) return;
@@ -106,9 +105,9 @@ CAST_ACCESSOR(UncompiledDataWithPreparseData)
ACCESSORS(UncompiledDataWithPreparseData, preparse_data, PreparseData,
kPreparseDataOffset)
-bool HeapObject::IsUncompiledData() const {
- return IsUncompiledDataWithoutPreparseData() ||
- IsUncompiledDataWithPreparseData();
+DEF_GETTER(HeapObject, IsUncompiledData, bool) {
+ return IsUncompiledDataWithoutPreparseData(isolate) ||
+ IsUncompiledDataWithPreparseData(isolate);
}
OBJECT_CONSTRUCTORS_IMPL(InterpreterData, Struct)
@@ -128,6 +127,9 @@ ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
kScriptOrDebugInfoOffset)
+INT32_ACCESSORS(SharedFunctionInfo, function_literal_id,
+ kFunctionLiteralIdOffset)
+
#if V8_SFI_HAS_UNIQUE_ID
INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
#endif
@@ -629,7 +631,7 @@ void SharedFunctionInfo::ClearPreparseData() {
// static
void UncompiledData::Initialize(
UncompiledData data, String inferred_name, int start_position,
- int end_position, int function_literal_id,
+ int end_position,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot) {
data.set_inferred_name(inferred_name);
@@ -637,28 +639,22 @@ void UncompiledData::Initialize(
data, data.RawField(UncompiledData::kInferredNameOffset), inferred_name);
data.set_start_position(start_position);
data.set_end_position(end_position);
- data.set_function_literal_id(function_literal_id);
data.clear_padding();
}
void UncompiledDataWithPreparseData::Initialize(
UncompiledDataWithPreparseData data, String inferred_name,
- int start_position, int end_position, int function_literal_id,
- PreparseData scope_data,
+ int start_position, int end_position, PreparseData scope_data,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot) {
UncompiledData::Initialize(data, inferred_name, start_position, end_position,
- function_literal_id, gc_notify_updated_slot);
+ gc_notify_updated_slot);
data.set_preparse_data(scope_data);
gc_notify_updated_slot(
data, data.RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
scope_data);
}
-bool UncompiledData::has_function_literal_id() {
- return function_literal_id() != kFunctionLiteralIdInvalid;
-}
-
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
return function_data().IsWasmExportedFunctionData();
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index a3b84ee46e..f7a82964b1 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -104,16 +104,12 @@ class UncompiledData : public HeapObject {
DECL_ACCESSORS(inferred_name, String)
DECL_INT32_ACCESSORS(start_position)
DECL_INT32_ACCESSORS(end_position)
- DECL_INT32_ACCESSORS(function_literal_id)
-
- // Returns true if the UncompiledData contains a valid function_literal_id.
- inline bool has_function_literal_id();
DECL_CAST(UncompiledData)
inline static void Initialize(
UncompiledData data, String inferred_name, int start_position,
- int end_position, int function_literal_id,
+ int end_position,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot =
[](HeapObject object, ObjectSlot slot, HeapObject target) {});
@@ -126,7 +122,6 @@ class UncompiledData : public HeapObject {
/* Raw data fields. */ \
V(kStartPositionOffset, kInt32Size) \
V(kEndPositionOffset, kInt32Size) \
- V(kFunctionLiteralIdOffset, kInt32Size) \
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
/* Header size. */ \
V(kSize, 0)
@@ -172,8 +167,7 @@ class UncompiledDataWithPreparseData : public UncompiledData {
inline static void Initialize(
UncompiledDataWithPreparseData data, String inferred_name,
- int start_position, int end_position, int function_literal_id,
- PreparseData scope_data,
+ int start_position, int end_position, PreparseData scope_data,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot =
[](HeapObject object, ObjectSlot slot, HeapObject target) {});
@@ -316,6 +310,11 @@ class SharedFunctionInfo : public HeapObject {
// function. The value is only reliable when the function has been compiled.
DECL_UINT16_ACCESSORS(expected_nof_properties)
+ // [function_literal_id] - uniquely identifies the FunctionLiteral this
+ // SharedFunctionInfo represents within its script, or -1 if this
+ // SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
+ DECL_INT32_ACCESSORS(function_literal_id)
+
#if V8_SFI_HAS_UNIQUE_ID
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
// even if the GC moves this SharedFunctionInfo.
@@ -385,9 +384,6 @@ class SharedFunctionInfo : public HeapObject {
inline bool HasInferredName();
inline String inferred_name();
- // Get the function literal id associated with this function, for parsing.
- V8_EXPORT_PRIVATE int FunctionLiteralId(Isolate* isolate) const;
-
// Break infos are contained in DebugInfo, this is a convenience method
// to simplify access.
V8_EXPORT_PRIVATE bool HasBreakInfo() const;
@@ -624,7 +620,7 @@ class SharedFunctionInfo : public HeapObject {
// Returns the unique TraceID for this SharedFunctionInfo (within the
// kTraceScope, works only for functions that have a Script and start/end
// position).
- uint64_t TraceID() const;
+ uint64_t TraceID(FunctionLiteral* literal = nullptr) const;
// Returns the unique trace ID reference for this SharedFunctionInfo
// (based on the |TraceID()| above).
@@ -634,16 +630,14 @@ class SharedFunctionInfo : public HeapObject {
class ScriptIterator {
public:
V8_EXPORT_PRIVATE ScriptIterator(Isolate* isolate, Script script);
- ScriptIterator(Isolate* isolate,
- Handle<WeakFixedArray> shared_function_infos);
+ explicit ScriptIterator(Handle<WeakFixedArray> shared_function_infos);
V8_EXPORT_PRIVATE SharedFunctionInfo Next();
int CurrentIndex() const { return index_ - 1; }
// Reset the iterator to run on |script|.
- void Reset(Script script);
+ void Reset(Isolate* isolate, Script script);
private:
- Isolate* isolate_;
Handle<WeakFixedArray> shared_function_infos_;
int index_;
DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
@@ -656,6 +650,7 @@ class SharedFunctionInfo : public HeapObject {
V8_EXPORT_PRIVATE SharedFunctionInfo Next();
private:
+ Isolate* isolate_;
Script::Iterator script_iterator_;
WeakArrayList::Iterator noscript_sfi_iterator_;
SharedFunctionInfo::ScriptIterator sfi_iterator_;
@@ -744,10 +739,6 @@ class SharedFunctionInfo : public HeapObject {
friend class V8HeapExplorer;
FRIEND_TEST(PreParserTest, LazyFunctionLength);
- // Find the index of this function in the parent script. Slow path of
- // FunctionLiteralId.
- int FindIndexInScript(Isolate* isolate) const;
-
OBJECT_CONSTRUCTORS(SharedFunctionInfo, HeapObject);
};
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index fa8b558939..85f6525399 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_SLOTS_H_
#define V8_OBJECTS_SLOTS_H_
+#include "src/base/memory.h"
#include "src/common/globals.h"
-#include "src/common/v8memory.h"
namespace v8 {
namespace internal {
@@ -192,11 +192,11 @@ class UnalignedSlot : public SlotBase<UnalignedSlot<T>, T, 1> {
Reference(const Reference&) V8_NOEXCEPT = default;
Reference& operator=(const Reference& other) V8_NOEXCEPT {
- WriteUnalignedValue<T>(address_, other.value());
+ base::WriteUnalignedValue<T>(address_, other.value());
return *this;
}
Reference& operator=(T value) {
- WriteUnalignedValue<T>(address_, value);
+ base::WriteUnalignedValue<T>(address_, value);
return *this;
}
@@ -206,8 +206,8 @@ class UnalignedSlot : public SlotBase<UnalignedSlot<T>, T, 1> {
void swap(Reference& other) {
T tmp = value();
- WriteUnalignedValue<T>(address_, other.value());
- WriteUnalignedValue<T>(other.address_, tmp);
+ base::WriteUnalignedValue<T>(address_, other.value());
+ base::WriteUnalignedValue<T>(other.address_, tmp);
}
bool operator<(const Reference& other) const {
@@ -219,7 +219,7 @@ class UnalignedSlot : public SlotBase<UnalignedSlot<T>, T, 1> {
}
private:
- T value() const { return ReadUnalignedValue<T>(address_); }
+ T value() const { return base::ReadUnalignedValue<T>(address_); }
Address address_;
};
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
new file mode 100644
index 0000000000..e6637415c1
--- /dev/null
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -0,0 +1,661 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/source-text-module.h"
+
+#include "src/api/api-inl.h"
+#include "src/ast/modules.h"
+#include "src/builtins/accessors.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+struct StringHandleHash {
+ V8_INLINE size_t operator()(Handle<String> string) const {
+ return string->Hash();
+ }
+};
+
+struct StringHandleEqual {
+ V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
+ return lhs->Equals(*rhs);
+ }
+};
+
+class UnorderedStringSet
+ : public std::unordered_set<Handle<String>, StringHandleHash,
+ StringHandleEqual,
+ ZoneAllocator<Handle<String>>> {
+ public:
+ explicit UnorderedStringSet(Zone* zone)
+ : std::unordered_set<Handle<String>, StringHandleHash, StringHandleEqual,
+ ZoneAllocator<Handle<String>>>(
+ 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+ ZoneAllocator<Handle<String>>(zone)) {}
+};
+
+class UnorderedStringMap
+ : public std::unordered_map<
+ Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+ ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>> {
+ public:
+ explicit UnorderedStringMap(Zone* zone)
+ : std::unordered_map<
+ Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+ ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>>(
+ 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+ ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>(
+ zone)) {}
+};
+
+class Module::ResolveSet
+ : public std::unordered_map<
+ Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
+ ModuleHandleEqual,
+ ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>> {
+ public:
+ explicit ResolveSet(Zone* zone)
+ : std::unordered_map<Handle<Module>, UnorderedStringSet*,
+ ModuleHandleHash, ModuleHandleEqual,
+ ZoneAllocator<std::pair<const Handle<Module>,
+ UnorderedStringSet*>>>(
+ 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
+ ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>(
+ zone)),
+ zone_(zone) {}
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ Zone* zone_;
+};
+
+SharedFunctionInfo SourceTextModule::GetSharedFunctionInfo() const {
+ DisallowHeapAllocation no_alloc;
+ DCHECK_NE(status(), Module::kEvaluating);
+ DCHECK_NE(status(), Module::kEvaluated);
+ switch (status()) {
+ case kUninstantiated:
+ case kPreInstantiating:
+ DCHECK(code().IsSharedFunctionInfo());
+ return SharedFunctionInfo::cast(code());
+ case kInstantiating:
+ DCHECK(code().IsJSFunction());
+ return JSFunction::cast(code()).shared();
+ case kInstantiated:
+ DCHECK(code().IsJSGeneratorObject());
+ return JSGeneratorObject::cast(code()).function().shared();
+ case kEvaluating:
+ case kEvaluated:
+ case kErrored:
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+}
+
+int SourceTextModule::ExportIndex(int cell_index) {
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kExport);
+ return cell_index - 1;
+}
+
+int SourceTextModule::ImportIndex(int cell_index) {
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kImport);
+ return -cell_index - 1;
+}
+
+void SourceTextModule::CreateIndirectExport(
+ Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
+ Handle<SourceTextModuleInfoEntry> entry) {
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, entry);
+ module->set_exports(*exports);
+}
+
+void SourceTextModule::CreateExport(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ int cell_index, Handle<FixedArray> names) {
+ DCHECK_LT(0, names->length());
+ Handle<Cell> cell =
+ isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ module->regular_exports().set(ExportIndex(cell_index), *cell);
+
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ for (int i = 0, n = names->length(); i < n; ++i) {
+ Handle<String> name(String::cast(names->get(i)), isolate);
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, cell);
+ }
+ module->set_exports(*exports);
+}
+
+Cell SourceTextModule::GetCell(int cell_index) {
+ DisallowHeapAllocation no_gc;
+ Object cell;
+ switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case SourceTextModuleDescriptor::kImport:
+ cell = regular_imports().get(ImportIndex(cell_index));
+ break;
+ case SourceTextModuleDescriptor::kExport:
+ cell = regular_exports().get(ExportIndex(cell_index));
+ break;
+ case SourceTextModuleDescriptor::kInvalid:
+ UNREACHABLE();
+ break;
+ }
+ return Cell::cast(cell);
+}
+
+Handle<Object> SourceTextModule::LoadVariable(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ int cell_index) {
+ return handle(module->GetCell(cell_index).value(), isolate);
+}
+
+void SourceTextModule::StoreVariable(Handle<SourceTextModule> module,
+ int cell_index, Handle<Object> value) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kExport);
+ module->GetCell(cell_index).set_value(*value);
+}
+
+MaybeHandle<Cell> SourceTextModule::ResolveExport(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, Module::ResolveSet* resolve_set) {
+ Handle<Object> object(module->exports().Lookup(export_name), isolate);
+ if (object->IsCell()) {
+ // Already resolved (e.g. because it's a local export).
+ return Handle<Cell>::cast(object);
+ }
+
+ // Check for cycle before recursing.
+ {
+ // Attempt insertion with a null string set.
+ auto result = resolve_set->insert({module, nullptr});
+ UnorderedStringSet*& name_set = result.first->second;
+ if (result.second) {
+ // |module| wasn't in the map previously, so allocate a new name set.
+ Zone* zone = resolve_set->zone();
+ name_set =
+ new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
+ } else if (name_set->count(export_name)) {
+ // Cycle detected.
+ if (must_resolve) {
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kCyclicModuleDependency, export_name,
+ module_specifier),
+ &loc);
+ }
+ return MaybeHandle<Cell>();
+ }
+ name_set->insert(export_name);
+ }
+
+ if (object->IsSourceTextModuleInfoEntry()) {
+ // Not yet resolved indirect export.
+ Handle<SourceTextModuleInfoEntry> entry =
+ Handle<SourceTextModuleInfoEntry>::cast(object);
+ Handle<String> import_name(String::cast(entry->import_name()), isolate);
+ Handle<Script> script(module->script(), isolate);
+ MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
+
+ Handle<Cell> cell;
+ if (!ResolveImport(isolate, module, import_name, entry->module_request(),
+ new_loc, true, resolve_set)
+ .ToHandle(&cell)) {
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Cell>();
+ }
+
+ // The export table may have changed but the entry in question should be
+ // unchanged.
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(export_name).IsSourceTextModuleInfoEntry());
+
+ exports = ObjectHashTable::Put(exports, export_name, cell);
+ module->set_exports(*exports);
+ return cell;
+ }
+
+ DCHECK(object->IsTheHole(isolate));
+ return SourceTextModule::ResolveExportUsingStarExports(
+ isolate, module, module_specifier, export_name, loc, must_resolve,
+ resolve_set);
+}
+
+MaybeHandle<Cell> SourceTextModule::ResolveImport(
+ Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
+ int module_request, MessageLocation loc, bool must_resolve,
+ Module::ResolveSet* resolve_set) {
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules().get(module_request)), isolate);
+ Handle<String> specifier(
+ String::cast(module->info().module_requests().get(module_request)),
+ isolate);
+ MaybeHandle<Cell> result =
+ Module::ResolveExport(isolate, requested_module, specifier, name, loc,
+ must_resolve, resolve_set);
+ DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
+ return result;
+}
+
+MaybeHandle<Cell> SourceTextModule::ResolveExportUsingStarExports(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, Module::ResolveSet* resolve_set) {
+ if (!export_name->Equals(ReadOnlyRoots(isolate).default_string())) {
+ // Go through all star exports looking for the given name. If multiple star
+ // exports provide the name, make sure they all map it to the same cell.
+ Handle<Cell> unique_cell;
+ Handle<FixedArray> special_exports(module->info().special_exports(),
+ isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ i::Handle<i::SourceTextModuleInfoEntry> entry(
+ i::SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ if (!entry->export_name().IsUndefined(isolate)) {
+ continue; // Indirect export.
+ }
+
+ Handle<Script> script(module->script(), isolate);
+ MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
+
+ Handle<Cell> cell;
+ if (ResolveImport(isolate, module, export_name, entry->module_request(),
+ new_loc, false, resolve_set)
+ .ToHandle(&cell)) {
+ if (unique_cell.is_null()) unique_cell = cell;
+ if (*unique_cell != *cell) {
+ return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
+ MessageTemplate::kAmbiguousExport,
+ module_specifier, export_name),
+ &loc);
+ }
+ } else if (isolate->has_pending_exception()) {
+ return MaybeHandle<Cell>();
+ }
+ }
+
+ if (!unique_cell.is_null()) {
+ // Found a unique star export for this name.
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(export_name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, export_name, unique_cell);
+ module->set_exports(*exports);
+ return unique_cell;
+ }
+ }
+
+ // Unresolvable.
+ if (must_resolve) {
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
+ module_specifier, export_name),
+ &loc);
+ }
+ return MaybeHandle<Cell>();
+}
+
+bool SourceTextModule::PrepareInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ v8::Local<v8::Context> context, v8::Module::ResolveCallback callback) {
+ // Obtain requested modules.
+ Handle<SourceTextModuleInfo> module_info(module->info(), isolate);
+ Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ for (int i = 0, length = module_requests->length(); i < length; ++i) {
+ Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
+ v8::Local<v8::Module> api_requested_module;
+ if (!callback(context, v8::Utils::ToLocal(specifier),
+ v8::Utils::ToLocal(Handle<Module>::cast(module)))
+ .ToLocal(&api_requested_module)) {
+ isolate->PromoteScheduledException();
+ return false;
+ }
+ Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
+ requested_modules->set(i, *requested_module);
+ }
+
+ // Recurse.
+ for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+ Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
+ isolate);
+ if (!Module::PrepareInstantiate(isolate, requested_module, context,
+ callback)) {
+ return false;
+ }
+ }
+
+ // Set up local exports.
+ // TODO(neis): Create regular_exports array here instead of in factory method?
+ for (int i = 0, n = module_info->RegularExportCount(); i < n; ++i) {
+ int cell_index = module_info->RegularExportCellIndex(i);
+ Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
+ isolate);
+ CreateExport(isolate, module, cell_index, export_names);
+ }
+
+ // Partially set up indirect exports.
+ // For each indirect export, we create the appropriate slot in the export
+ // table and store its SourceTextModuleInfoEntry there. When we later find
+ // the correct Cell in the module that actually provides the value, we replace
+ // the SourceTextModuleInfoEntry by that Cell (see ResolveExport).
+ Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ Handle<Object> export_name(entry->export_name(), isolate);
+ if (export_name->IsUndefined(isolate)) continue; // Star export.
+ CreateIndirectExport(isolate, module, Handle<String>::cast(export_name),
+ entry);
+ }
+
+ DCHECK_EQ(module->status(), kPreInstantiating);
+ return true;
+}
+
+bool SourceTextModule::RunInitializationCode(Isolate* isolate,
+ Handle<SourceTextModule> module) {
+ DCHECK_EQ(module->status(), kInstantiating);
+ Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
+ DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ Handle<Object> argv[] = {module};
+ MaybeHandle<Object> maybe_generator =
+ Execution::Call(isolate, function, receiver, arraysize(argv), argv);
+ Handle<Object> generator;
+ if (!maybe_generator.ToHandle(&generator)) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+ DCHECK_EQ(*function, Handle<JSGeneratorObject>::cast(generator)->function());
+ module->set_code(*generator);
+ return true;
+}
+
+bool SourceTextModule::MaybeTransitionComponent(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, Status new_status) {
+ DCHECK(new_status == kInstantiated || new_status == kEvaluated);
+ SLOW_DCHECK(
+ // {module} is on the {stack}.
+ std::count_if(stack->begin(), stack->end(),
+ [&](Handle<Module> m) { return *m == *module; }) == 1);
+ DCHECK_LE(module->dfs_ancestor_index(), module->dfs_index());
+ if (module->dfs_ancestor_index() == module->dfs_index()) {
+ // This is the root of its strongly connected component.
+ Handle<SourceTextModule> ancestor;
+ do {
+ ancestor = stack->front();
+ stack->pop_front();
+ DCHECK_EQ(ancestor->status(),
+ new_status == kInstantiated ? kInstantiating : kEvaluating);
+ if (new_status == kInstantiated) {
+ if (!SourceTextModule::RunInitializationCode(isolate, ancestor))
+ return false;
+ }
+ ancestor->SetStatus(new_status);
+ } while (*ancestor != *module);
+ }
+ return true;
+}
+
+bool SourceTextModule::FinishInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
+ Zone* zone) {
+ // Instantiate SharedFunctionInfo and mark module as instantiating for
+ // the recursion.
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
+ isolate);
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, isolate->native_context());
+ module->set_code(*function);
+ module->SetStatus(kInstantiating);
+ module->set_dfs_index(*dfs_index);
+ module->set_dfs_ancestor_index(*dfs_index);
+ stack->push_front(module);
+ (*dfs_index)++;
+
+ // Recurse.
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+ Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
+ isolate);
+ if (!Module::FinishInstantiate(isolate, requested_module, stack, dfs_index,
+ zone)) {
+ return false;
+ }
+
+ DCHECK_NE(requested_module->status(), kEvaluating);
+ DCHECK_GE(requested_module->status(), kInstantiating);
+ SLOW_DCHECK(
+ // {requested_module} is instantiating iff it's on the {stack}.
+ (requested_module->status() == kInstantiating) ==
+ std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
+ return *m == *requested_module;
+ }));
+
+ if (requested_module->status() == kInstantiating) {
+ // SyntheticModules go straight to kInstantiated so this must be a
+ // SourceTextModule
+ module->set_dfs_ancestor_index(
+ std::min(module->dfs_ancestor_index(),
+ Handle<SourceTextModule>::cast(requested_module)
+ ->dfs_ancestor_index()));
+ }
+ }
+
+ Handle<Script> script(module->script(), isolate);
+ Handle<SourceTextModuleInfo> module_info(module->info(), isolate);
+
+ // Resolve imports.
+ Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
+ for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+ Handle<String> name(String::cast(entry->import_name()), isolate);
+ MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
+ ResolveSet resolve_set(zone);
+ Handle<Cell> cell;
+ if (!ResolveImport(isolate, module, name, entry->module_request(), loc,
+ true, &resolve_set)
+ .ToHandle(&cell)) {
+ return false;
+ }
+ module->regular_imports().set(ImportIndex(entry->cell_index()), *cell);
+ }
+
+ // Resolve indirect exports.
+ Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ Handle<Object> name(entry->export_name(), isolate);
+ if (name->IsUndefined(isolate)) continue; // Star export.
+ MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
+ ResolveSet resolve_set(zone);
+ if (ResolveExport(isolate, module, Handle<String>(),
+ Handle<String>::cast(name), loc, true, &resolve_set)
+ .is_null()) {
+ return false;
+ }
+ }
+
+ return MaybeTransitionComponent(isolate, module, stack, kInstantiated);
+}
+
+void SourceTextModule::FetchStarExports(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ Zone* zone,
+ UnorderedModuleSet* visited) {
+ DCHECK_GE(module->status(), Module::kInstantiating);
+
+ if (module->module_namespace().IsJSModuleNamespace()) return; // Shortcut.
+
+ bool cycle = !visited->insert(module).second;
+ if (cycle) return;
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ UnorderedStringMap more_exports(zone);
+
+ // TODO(neis): Only allocate more_exports if there are star exports.
+ // Maybe split special_exports into indirect_exports and star_exports.
+
+ ReadOnlyRoots roots(isolate);
+ Handle<FixedArray> special_exports(module->info().special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ if (!entry->export_name().IsUndefined(roots)) {
+ continue; // Indirect export.
+ }
+
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules().get(entry->module_request())),
+ isolate);
+
+ // Recurse.
+ if (requested_module->IsSourceTextModule())
+ FetchStarExports(isolate,
+ Handle<SourceTextModule>::cast(requested_module), zone,
+ visited);
+
+ // Collect all of [requested_module]'s exports that must be added to
+ // [module]'s exports (i.e. to [exports]). We record these in
+ // [more_exports]. Ambiguities (conflicting exports) are marked by mapping
+ // the name to undefined instead of a Cell.
+ Handle<ObjectHashTable> requested_exports(requested_module->exports(),
+ isolate);
+ for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
+ Object key;
+ if (!requested_exports->ToKey(roots, i, &key)) continue;
+ Handle<String> name(String::cast(key), isolate);
+
+ if (name->Equals(roots.default_string())) continue;
+ if (!exports->Lookup(name).IsTheHole(roots)) continue;
+
+ Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
+ auto insert_result = more_exports.insert(std::make_pair(name, cell));
+ if (!insert_result.second) {
+ auto it = insert_result.first;
+ if (*it->second == *cell || it->second->IsUndefined(roots)) {
+ // We already recorded this mapping before, or the name is already
+ // known to be ambiguous. In either case, there's nothing to do.
+ } else {
+ DCHECK(it->second->IsCell());
+ // Different star exports provide different cells for this name, hence
+ // mark the name as ambiguous.
+ it->second = roots.undefined_value_handle();
+ }
+ }
+ }
+ }
+
+ // Copy [more_exports] into [exports].
+ for (const auto& elem : more_exports) {
+ if (elem.second->IsUndefined(isolate)) continue; // Ambiguous export.
+ DCHECK(!elem.first->Equals(ReadOnlyRoots(isolate).default_string()));
+ DCHECK(elem.second->IsCell());
+ exports = ObjectHashTable::Put(exports, elem.first, elem.second);
+ }
+ module->set_exports(*exports);
+}
+
+Handle<JSModuleNamespace> SourceTextModule::GetModuleNamespace(
+ Isolate* isolate, Handle<SourceTextModule> module, int module_request) {
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules().get(module_request)), isolate);
+ return Module::GetModuleNamespace(isolate, requested_module);
+}
+
+MaybeHandle<Object> SourceTextModule::Evaluate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) {
+ Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()),
+ isolate);
+ module->set_code(
+ generator->function().shared().scope_info().ModuleDescriptorInfo());
+ module->SetStatus(kEvaluating);
+ module->set_dfs_index(*dfs_index);
+ module->set_dfs_ancestor_index(*dfs_index);
+ stack->push_front(module);
+ (*dfs_index)++;
+
+ // Recursion.
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+ Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
+ isolate);
+ RETURN_ON_EXCEPTION(
+ isolate, Module::Evaluate(isolate, requested_module, stack, dfs_index),
+ Object);
+
+ DCHECK_GE(requested_module->status(), kEvaluating);
+ DCHECK_NE(requested_module->status(), kErrored);
+ SLOW_DCHECK(
+ // {requested_module} is evaluating iff it's on the {stack}.
+ (requested_module->status() == kEvaluating) ==
+ std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
+ return *m == *requested_module;
+ }));
+
+ if (requested_module->status() == kEvaluating) {
+ // SyntheticModules go straight to kEvaluated so this must be a
+ // SourceTextModule
+ module->set_dfs_ancestor_index(
+ std::min(module->dfs_ancestor_index(),
+ Handle<SourceTextModule>::cast(requested_module)
+ ->dfs_ancestor_index()));
+ }
+ }
+
+ // Evaluation of module body.
+ Handle<JSFunction> resume(
+ isolate->native_context()->generator_next_internal(), isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
+ Object);
+ DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
+
+ CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
+ return handle(JSIteratorResult::cast(*result).value(), isolate);
+}
+
+void SourceTextModule::Reset(Isolate* isolate,
+ Handle<SourceTextModule> module) {
+ Factory* factory = isolate->factory();
+
+ DCHECK(module->import_meta().IsTheHole(isolate));
+
+ Handle<FixedArray> regular_exports =
+ factory->NewFixedArray(module->regular_exports().length());
+ Handle<FixedArray> regular_imports =
+ factory->NewFixedArray(module->regular_imports().length());
+ Handle<FixedArray> requested_modules =
+ factory->NewFixedArray(module->requested_modules().length());
+
+ if (module->status() == kInstantiating) {
+ module->set_code(JSFunction::cast(module->code()).shared());
+ }
+ module->set_regular_exports(*regular_exports);
+ module->set_regular_imports(*regular_imports);
+ module->set_requested_modules(*requested_modules);
+ module->set_dfs_index(-1);
+ module->set_dfs_ancestor_index(-1);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
new file mode 100644
index 0000000000..5c20b7018b
--- /dev/null
+++ b/deps/v8/src/objects/source-text-module.h
@@ -0,0 +1,220 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SOURCE_TEXT_MODULE_H_
+#define V8_OBJECTS_SOURCE_TEXT_MODULE_H_
+
+#include "src/objects/module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class UnorderedModuleSet;
+
+// The runtime representation of an ECMAScript Source Text Module Record.
+// https://tc39.github.io/ecma262/#sec-source-text-module-records
+class SourceTextModule : public Module {
+ public:
+ NEVER_READ_ONLY_SPACE
+ DECL_CAST(SourceTextModule)
+ DECL_VERIFIER(SourceTextModule)
+ DECL_PRINTER(SourceTextModule)
+
+ // The code representing this module, or an abstraction thereof.
+ // This is either a SharedFunctionInfo, a JSFunction, a JSGeneratorObject, or
+ // a SourceTextModuleInfo, depending on the state (status) the module is in.
+ // See SourceTextModule::SourceTextModuleVerify() for the precise invariant.
+ DECL_ACCESSORS(code, Object)
+
+ // Arrays of cells corresponding to regular exports and regular imports.
+ // A cell's position in the array is determined by the cell index of the
+ // associated module entry (which coincides with the variable index of the
+ // associated variable).
+ DECL_ACCESSORS(regular_exports, FixedArray)
+ DECL_ACCESSORS(regular_imports, FixedArray)
+
+ // The shared function info in case {status} is not kEvaluating, kEvaluated or
+ // kErrored.
+ SharedFunctionInfo GetSharedFunctionInfo() const;
+
+ // Modules imported or re-exported by this module.
+ // Corresponds 1-to-1 to the module specifier strings in
+ // SourceTextModuleInfo::module_requests.
+ DECL_ACCESSORS(requested_modules, FixedArray)
+
+ // [script]: Script from which the module originates.
+ DECL_ACCESSORS(script, Script)
+
+ // The value of import.meta inside of this module.
+ // Lazily initialized on first access. It's the hole before first access and
+ // a JSObject afterwards.
+ DECL_ACCESSORS(import_meta, Object)
+
+ // Get the SourceTextModuleInfo associated with the code.
+ inline SourceTextModuleInfo info() const;
+
+ Cell GetCell(int cell_index);
+ static Handle<Object> LoadVariable(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ int cell_index);
+ static void StoreVariable(Handle<SourceTextModule> module, int cell_index,
+ Handle<Object> value);
+
+ static int ImportIndex(int cell_index);
+ static int ExportIndex(int cell_index);
+
+ // Get the namespace object for [module_request] of [module]. If it doesn't
+ // exist yet, it is created.
+ static Handle<JSModuleNamespace> GetModuleNamespace(
+ Isolate* isolate, Handle<SourceTextModule> module, int module_request);
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(Module::kHeaderSize,
+ TORQUE_GENERATED_SOURCE_TEXT_MODULE_FIELDS)
+
+ using BodyDescriptor =
+ SubclassBodyDescriptor<Module::BodyDescriptor,
+ FixedBodyDescriptor<kCodeOffset, kSize, kSize>>;
+
+ private:
+ friend class Factory;
+ friend class Module;
+
+ // TODO(neis): Don't store those in the module object?
+ DECL_INT_ACCESSORS(dfs_index)
+ DECL_INT_ACCESSORS(dfs_ancestor_index)
+
+ // Helpers for Instantiate and Evaluate.
+
+ static void CreateExport(Isolate* isolate, Handle<SourceTextModule> module,
+ int cell_index, Handle<FixedArray> names);
+ static void CreateIndirectExport(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ Handle<String> name,
+ Handle<SourceTextModuleInfoEntry> entry);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExport(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
+ Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
+ int module_request, MessageLocation loc, bool must_resolve,
+ ResolveSet* resolve_set);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
+
+ static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ v8::Local<v8::Context> context, v8::Module::ResolveCallback callback);
+ static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
+ Zone* zone);
+ static V8_WARN_UNUSED_RESULT bool RunInitializationCode(
+ Isolate* isolate, Handle<SourceTextModule> module);
+
+ static void FetchStarExports(Isolate* isolate,
+ Handle<SourceTextModule> module, Zone* zone,
+ UnorderedModuleSet* visited);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index);
+
+ static V8_WARN_UNUSED_RESULT bool MaybeTransitionComponent(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, Status new_status);
+
+ static void Reset(Isolate* isolate, Handle<SourceTextModule> module);
+
+ OBJECT_CONSTRUCTORS(SourceTextModule, Module);
+};
+
+// SourceTextModuleInfo is to SourceTextModuleDescriptor what ScopeInfo is to
+// Scope.
+class SourceTextModuleInfo : public FixedArray {
+ public:
+ DECL_CAST(SourceTextModuleInfo)
+
+ static Handle<SourceTextModuleInfo> New(Isolate* isolate, Zone* zone,
+ SourceTextModuleDescriptor* descr);
+
+ inline FixedArray module_requests() const;
+ inline FixedArray special_exports() const;
+ inline FixedArray regular_exports() const;
+ inline FixedArray regular_imports() const;
+ inline FixedArray namespace_imports() const;
+ inline FixedArray module_request_positions() const;
+
+ // Accessors for [regular_exports].
+ int RegularExportCount() const;
+ String RegularExportLocalName(int i) const;
+ int RegularExportCellIndex(int i) const;
+ FixedArray RegularExportExportNames(int i) const;
+
+#ifdef DEBUG
+ inline bool Equals(SourceTextModuleInfo other) const;
+#endif
+
+ private:
+ friend class Factory;
+ friend class SourceTextModuleDescriptor;
+ enum {
+ kModuleRequestsIndex,
+ kSpecialExportsIndex,
+ kRegularExportsIndex,
+ kNamespaceImportsIndex,
+ kRegularImportsIndex,
+ kModuleRequestPositionsIndex,
+ kLength
+ };
+ enum {
+ kRegularExportLocalNameOffset,
+ kRegularExportCellIndexOffset,
+ kRegularExportExportNamesOffset,
+ kRegularExportLength
+ };
+
+ OBJECT_CONSTRUCTORS(SourceTextModuleInfo, FixedArray);
+};
+
+class SourceTextModuleInfoEntry : public Struct {
+ public:
+ DECL_CAST(SourceTextModuleInfoEntry)
+ DECL_PRINTER(SourceTextModuleInfoEntry)
+ DECL_VERIFIER(SourceTextModuleInfoEntry)
+
+ DECL_ACCESSORS(export_name, Object)
+ DECL_ACCESSORS(local_name, Object)
+ DECL_ACCESSORS(import_name, Object)
+ DECL_INT_ACCESSORS(module_request)
+ DECL_INT_ACCESSORS(cell_index)
+ DECL_INT_ACCESSORS(beg_pos)
+ DECL_INT_ACCESSORS(end_pos)
+
+ static Handle<SourceTextModuleInfoEntry> New(
+ Isolate* isolate, Handle<Object> export_name, Handle<Object> local_name,
+ Handle<Object> import_name, int module_request, int cell_index,
+ int beg_pos, int end_pos);
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ Struct::kHeaderSize,
+ TORQUE_GENERATED_SOURCE_TEXT_MODULE_INFO_ENTRY_FIELDS)
+
+ OBJECT_CONSTRUCTORS(SourceTextModuleInfoEntry, Struct);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SOURCE_TEXT_MODULE_H_
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 8069e6e5c9..e72af4df94 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -32,11 +32,15 @@ ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameOffset)
ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
kScriptNameOrSourceUrlOffset)
ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameOffset)
+ACCESSORS(StackFrameInfo, method_name, Object, kMethodNameOffset)
+ACCESSORS(StackFrameInfo, type_name, Object, kTypeNameOffset)
+ACCESSORS(StackFrameInfo, eval_origin, Object, kEvalOriginOffset)
ACCESSORS(StackFrameInfo, wasm_module_name, Object, kWasmModuleNameOffset)
SMI_ACCESSORS(StackFrameInfo, flag, kFlagOffset)
BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_asmjs_wasm, kIsAsmJsWasmBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_user_java_script, kIsUserJavaScriptBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, kIsToplevelBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_async, kIsAsyncBit)
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index f427d7eae2..558449d85a 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -5,85 +5,144 @@
#include "src/objects/stack-frame-info.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/strings/string-builder-inl.h"
namespace v8 {
namespace internal {
+// static
int StackTraceFrame::GetLineNumber(Handle<StackTraceFrame> frame) {
int line = GetFrameInfo(frame)->line_number();
return line != StackFrameBase::kNone ? line : Message::kNoLineNumberInfo;
}
+// static
+int StackTraceFrame::GetOneBasedLineNumber(Handle<StackTraceFrame> frame) {
+ // JavaScript line numbers are already 1-based. Wasm line numbers need
+ // to be adjusted.
+ int line = StackTraceFrame::GetLineNumber(frame);
+ if (StackTraceFrame::IsWasm(frame) && line >= 0) line++;
+ return line;
+}
+
+// static
int StackTraceFrame::GetColumnNumber(Handle<StackTraceFrame> frame) {
int column = GetFrameInfo(frame)->column_number();
return column != StackFrameBase::kNone ? column : Message::kNoColumnInfo;
}
+// static
+int StackTraceFrame::GetOneBasedColumnNumber(Handle<StackTraceFrame> frame) {
+ // JavaScript colun numbers are already 1-based. Wasm column numbers need
+ // to be adjusted.
+ int column = StackTraceFrame::GetColumnNumber(frame);
+ if (StackTraceFrame::IsWasm(frame) && column >= 0) column++;
+ return column;
+}
+
+// static
int StackTraceFrame::GetScriptId(Handle<StackTraceFrame> frame) {
int id = GetFrameInfo(frame)->script_id();
return id != StackFrameBase::kNone ? id : Message::kNoScriptIdInfo;
}
+// static
int StackTraceFrame::GetPromiseAllIndex(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->promise_all_index();
}
+// static
Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
auto name = GetFrameInfo(frame)->script_name();
return handle(name, frame->GetIsolate());
}
+// static
Handle<Object> StackTraceFrame::GetScriptNameOrSourceUrl(
Handle<StackTraceFrame> frame) {
auto name = GetFrameInfo(frame)->script_name_or_source_url();
return handle(name, frame->GetIsolate());
}
+// static
Handle<Object> StackTraceFrame::GetFunctionName(Handle<StackTraceFrame> frame) {
auto name = GetFrameInfo(frame)->function_name();
return handle(name, frame->GetIsolate());
}
+// static
+Handle<Object> StackTraceFrame::GetMethodName(Handle<StackTraceFrame> frame) {
+ auto name = GetFrameInfo(frame)->method_name();
+ return handle(name, frame->GetIsolate());
+}
+
+// static
+Handle<Object> StackTraceFrame::GetTypeName(Handle<StackTraceFrame> frame) {
+ auto name = GetFrameInfo(frame)->type_name();
+ return handle(name, frame->GetIsolate());
+}
+
+// static
+Handle<Object> StackTraceFrame::GetEvalOrigin(Handle<StackTraceFrame> frame) {
+ auto origin = GetFrameInfo(frame)->eval_origin();
+ return handle(origin, frame->GetIsolate());
+}
+
+// static
Handle<Object> StackTraceFrame::GetWasmModuleName(
Handle<StackTraceFrame> frame) {
auto module = GetFrameInfo(frame)->wasm_module_name();
return handle(module, frame->GetIsolate());
}
+// static
bool StackTraceFrame::IsEval(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_eval();
}
+// static
bool StackTraceFrame::IsConstructor(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_constructor();
}
+// static
bool StackTraceFrame::IsWasm(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_wasm();
}
+// static
+bool StackTraceFrame::IsAsmJsWasm(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_asmjs_wasm();
+}
+
+// static
bool StackTraceFrame::IsUserJavaScript(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_user_java_script();
}
+// static
bool StackTraceFrame::IsToplevel(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_toplevel();
}
+// static
bool StackTraceFrame::IsAsync(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_async();
}
+// static
bool StackTraceFrame::IsPromiseAll(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_promise_all();
}
+// static
Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
Handle<StackTraceFrame> frame) {
if (frame->frame_info().IsUndefined()) InitializeFrameInfo(frame);
return handle(StackFrameInfo::cast(frame->frame_info()), frame->GetIsolate());
}
+// static
void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
Isolate* isolate = frame->GetIsolate();
Handle<StackFrameInfo> frame_info = isolate->factory()->NewStackFrameInfo(
@@ -97,5 +156,259 @@ void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
frame->set_frame_index(-1);
}
+Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
+ Handle<FixedArray> stack_trace) {
+ // For the empty case, a empty FrameArray needs to be allocated so the rest
+ // of the code doesn't has to be special cased everywhere.
+ if (stack_trace->length() == 0) {
+ return isolate->factory()->NewFrameArray(0);
+ }
+
+ // Retrieve the FrameArray from the first StackTraceFrame.
+ DCHECK_GT(stack_trace->length(), 0);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(stack_trace->get(0)),
+ isolate);
+ return handle(FrameArray::cast(frame->frame_array()), isolate);
+}
+
+namespace {
+
+bool IsNonEmptyString(Handle<Object> object) {
+ return (object->IsString() && String::cast(*object).length() > 0);
+}
+
+void AppendFileLocation(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
+ Handle<Object> file_name = StackTraceFrame::GetScriptNameOrSourceUrl(frame);
+ if (!file_name->IsString() && StackTraceFrame::IsEval(frame)) {
+ Handle<Object> eval_origin = StackTraceFrame::GetEvalOrigin(frame);
+ DCHECK(eval_origin->IsString());
+ builder->AppendString(Handle<String>::cast(eval_origin));
+ builder->AppendCString(", "); // Expecting source position to follow.
+ }
+
+ if (IsNonEmptyString(file_name)) {
+ builder->AppendString(Handle<String>::cast(file_name));
+ } else {
+ // Source code does not originate from a file and is not native, but we
+ // can still get the source position inside the source string, e.g. in
+ // an eval string.
+ builder->AppendCString("<anonymous>");
+ }
+
+ int line_number = StackTraceFrame::GetLineNumber(frame);
+ if (line_number != Message::kNoLineNumberInfo) {
+ builder->AppendCharacter(':');
+ builder->AppendInt(line_number);
+
+ int column_number = StackTraceFrame::GetColumnNumber(frame);
+ if (column_number != Message::kNoColumnInfo) {
+ builder->AppendCharacter(':');
+ builder->AppendInt(column_number);
+ }
+ }
+}
+
+int StringIndexOf(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (pattern->length() > subject->length()) return -1;
+ return String::IndexOf(isolate, subject, pattern, 0);
+}
+
+// Returns true iff
+// 1. the subject ends with '.' + pattern, or
+// 2. subject == pattern.
+bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (String::Equals(isolate, subject, pattern)) return true;
+
+ FlatStringReader subject_reader(isolate, String::Flatten(isolate, subject));
+ FlatStringReader pattern_reader(isolate, String::Flatten(isolate, pattern));
+
+ int pattern_index = pattern_reader.length() - 1;
+ int subject_index = subject_reader.length() - 1;
+ for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
+ if (subject_index < 0) {
+ return false;
+ }
+
+ const uc32 subject_char = subject_reader.Get(subject_index);
+ if (i == pattern_reader.length()) {
+ if (subject_char != '.') return false;
+ } else if (subject_char != pattern_reader.Get(pattern_index)) {
+ return false;
+ }
+
+ pattern_index--;
+ subject_index--;
+ }
+
+ return true;
+}
+
+void AppendMethodCall(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
+ Handle<Object> type_name = StackTraceFrame::GetTypeName(frame);
+ Handle<Object> method_name = StackTraceFrame::GetMethodName(frame);
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+
+ if (IsNonEmptyString(function_name)) {
+ Handle<String> function_string = Handle<String>::cast(function_name);
+ if (IsNonEmptyString(type_name)) {
+ Handle<String> type_string = Handle<String>::cast(type_name);
+ bool starts_with_type_name =
+ (StringIndexOf(isolate, function_string, type_string) == 0);
+ if (!starts_with_type_name) {
+ builder->AppendString(type_string);
+ builder->AppendCharacter('.');
+ }
+ }
+ builder->AppendString(function_string);
+
+ if (IsNonEmptyString(method_name)) {
+ Handle<String> method_string = Handle<String>::cast(method_name);
+ if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
+ builder->AppendCString(" [as ");
+ builder->AppendString(method_string);
+ builder->AppendCharacter(']');
+ }
+ }
+ } else {
+ if (IsNonEmptyString(type_name)) {
+ builder->AppendString(Handle<String>::cast(type_name));
+ builder->AppendCharacter('.');
+ }
+ if (IsNonEmptyString(method_name)) {
+ builder->AppendString(Handle<String>::cast(method_name));
+ } else {
+ builder->AppendCString("<anonymous>");
+ }
+ }
+}
+
+void SerializeJSStackFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+
+ const bool is_toplevel = StackTraceFrame::IsToplevel(frame);
+ const bool is_async = StackTraceFrame::IsAsync(frame);
+ const bool is_promise_all = StackTraceFrame::IsPromiseAll(frame);
+ const bool is_constructor = StackTraceFrame::IsConstructor(frame);
+ // Note: Keep the {is_method_call} predicate in sync with the corresponding
+ // predicate in factory.cc where the StackFrameInfo is created.
+ // Otherwise necessary fields for serialzing this frame might be
+ // missing.
+ const bool is_method_call = !(is_toplevel || is_constructor);
+
+ if (is_async) {
+ builder.AppendCString("async ");
+ }
+ if (is_promise_all) {
+ builder.AppendCString("Promise.all (index ");
+ builder.AppendInt(StackTraceFrame::GetPromiseAllIndex(frame));
+ builder.AppendCString(")");
+ return;
+ }
+ if (is_method_call) {
+ AppendMethodCall(isolate, frame, &builder);
+ } else if (is_constructor) {
+ builder.AppendCString("new ");
+ if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+ } else if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ AppendFileLocation(isolate, frame, &builder);
+ return;
+ }
+
+ builder.AppendCString(" (");
+ AppendFileLocation(isolate, frame, &builder);
+ builder.AppendCString(")");
+}
+
+void SerializeAsmJsWasmStackFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ // The string should look exactly as the respective javascript frame string.
+ // Keep this method in line to
+ // JSStackFrame::ToString(IncrementalStringBuilder&).
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+
+ if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ builder.AppendCString(" (");
+ }
+
+ AppendFileLocation(isolate, frame, &builder);
+
+ if (IsNonEmptyString(function_name)) builder.AppendCString(")");
+
+ return;
+}
+
+void SerializeWasmStackFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ Handle<Object> module_name = StackTraceFrame::GetWasmModuleName(frame);
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+ const bool has_name = !module_name->IsNull() || !function_name->IsNull();
+ if (has_name) {
+ if (module_name->IsNull()) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ builder.AppendString(Handle<String>::cast(module_name));
+ if (!function_name->IsNull()) {
+ builder.AppendCString(".");
+ builder.AppendString(Handle<String>::cast(function_name));
+ }
+ }
+ builder.AppendCString(" (");
+ }
+
+ const int wasm_func_index = StackTraceFrame::GetLineNumber(frame);
+
+ builder.AppendCString("wasm-function[");
+ builder.AppendInt(wasm_func_index);
+ builder.AppendCString("]:");
+
+ char buffer[16];
+ SNPrintF(ArrayVector(buffer), "0x%x",
+ StackTraceFrame::GetColumnNumber(frame));
+ builder.AppendCString(buffer);
+
+ if (has_name) builder.AppendCString(")");
+}
+
+} // namespace
+
+void SerializeStackTraceFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ // Ordering here is important, as AsmJs frames are also marked as Wasm.
+ if (StackTraceFrame::IsAsmJsWasm(frame)) {
+ SerializeAsmJsWasmStackFrame(isolate, frame, builder);
+ } else if (StackTraceFrame::IsWasm(frame)) {
+ SerializeWasmStackFrame(isolate, frame, builder);
+ } else {
+ SerializeJSStackFrame(isolate, frame, builder);
+ }
+}
+
+MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
+ Handle<StackTraceFrame> frame) {
+ IncrementalStringBuilder builder(isolate);
+ SerializeStackTraceFrame(isolate, frame, builder);
+ return builder.Finish();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 44826f67e6..3d91c5374f 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -25,10 +25,14 @@ class StackFrameInfo : public Struct {
DECL_ACCESSORS(script_name, Object)
DECL_ACCESSORS(script_name_or_source_url, Object)
DECL_ACCESSORS(function_name, Object)
+ DECL_ACCESSORS(method_name, Object)
+ DECL_ACCESSORS(type_name, Object)
+ DECL_ACCESSORS(eval_origin, Object)
DECL_ACCESSORS(wasm_module_name, Object)
DECL_BOOLEAN_ACCESSORS(is_eval)
DECL_BOOLEAN_ACCESSORS(is_constructor)
DECL_BOOLEAN_ACCESSORS(is_wasm)
+ DECL_BOOLEAN_ACCESSORS(is_asmjs_wasm)
DECL_BOOLEAN_ACCESSORS(is_user_java_script)
DECL_BOOLEAN_ACCESSORS(is_toplevel)
DECL_BOOLEAN_ACCESSORS(is_async)
@@ -49,10 +53,11 @@ class StackFrameInfo : public Struct {
static const int kIsEvalBit = 0;
static const int kIsConstructorBit = 1;
static const int kIsWasmBit = 2;
- static const int kIsUserJavaScriptBit = 3;
- static const int kIsToplevelBit = 4;
- static const int kIsAsyncBit = 5;
- static const int kIsPromiseAllBit = 6;
+ static const int kIsAsmJsWasmBit = 3;
+ static const int kIsUserJavaScriptBit = 4;
+ static const int kIsToplevelBit = 5;
+ static const int kIsAsyncBit = 6;
+ static const int kIsPromiseAllBit = 7;
OBJECT_CONSTRUCTORS(StackFrameInfo, Struct);
};
@@ -80,18 +85,24 @@ class StackTraceFrame : public Struct {
TORQUE_GENERATED_STACK_TRACE_FRAME_FIELDS)
static int GetLineNumber(Handle<StackTraceFrame> frame);
+ static int GetOneBasedLineNumber(Handle<StackTraceFrame> frame);
static int GetColumnNumber(Handle<StackTraceFrame> frame);
+ static int GetOneBasedColumnNumber(Handle<StackTraceFrame> frame);
static int GetScriptId(Handle<StackTraceFrame> frame);
static int GetPromiseAllIndex(Handle<StackTraceFrame> frame);
static Handle<Object> GetFileName(Handle<StackTraceFrame> frame);
static Handle<Object> GetScriptNameOrSourceUrl(Handle<StackTraceFrame> frame);
static Handle<Object> GetFunctionName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetMethodName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetTypeName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetEvalOrigin(Handle<StackTraceFrame> frame);
static Handle<Object> GetWasmModuleName(Handle<StackTraceFrame> frame);
static bool IsEval(Handle<StackTraceFrame> frame);
static bool IsConstructor(Handle<StackTraceFrame> frame);
static bool IsWasm(Handle<StackTraceFrame> frame);
+ static bool IsAsmJsWasm(Handle<StackTraceFrame> frame);
static bool IsUserJavaScript(Handle<StackTraceFrame> frame);
static bool IsToplevel(Handle<StackTraceFrame> frame);
static bool IsAsync(Handle<StackTraceFrame> frame);
@@ -104,6 +115,22 @@ class StackTraceFrame : public Struct {
static void InitializeFrameInfo(Handle<StackTraceFrame> frame);
};
+// Small helper that retrieves the FrameArray from a stack-trace
+// consisting of a FixedArray of StackTraceFrame objects.
+// This helper is only temporary until all FrameArray use-sites have
+// been converted to use StackTraceFrame and StackFrameInfo objects.
+V8_EXPORT_PRIVATE
+Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
+ Handle<FixedArray> stack_trace);
+
+class IncrementalStringBuilder;
+void SerializeStackTraceFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+);
+MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
+ Handle<StackTraceFrame> frame);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 0d8f83ca86..db724e0cf1 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -22,8 +22,6 @@
namespace v8 {
namespace internal {
-INT32_ACCESSORS(String, length, kLengthOffset)
-
int String::synchronized_length() const {
return base::AsAtomic32::Acquire_Load(
reinterpret_cast<const int32_t*>(FIELD_ADDR(*this, kLengthOffset)));
@@ -34,29 +32,21 @@ void String::synchronized_set_length(int value) {
reinterpret_cast<int32_t*>(FIELD_ADDR(*this, kLengthOffset)), value);
}
-OBJECT_CONSTRUCTORS_IMPL(String, Name)
-OBJECT_CONSTRUCTORS_IMPL(SeqString, String)
-OBJECT_CONSTRUCTORS_IMPL(SeqOneByteString, SeqString)
-OBJECT_CONSTRUCTORS_IMPL(SeqTwoByteString, SeqString)
-OBJECT_CONSTRUCTORS_IMPL(InternalizedString, String)
-OBJECT_CONSTRUCTORS_IMPL(ConsString, String)
-OBJECT_CONSTRUCTORS_IMPL(ThinString, String)
-OBJECT_CONSTRUCTORS_IMPL(SlicedString, String)
+TQ_OBJECT_CONSTRUCTORS_IMPL(String)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SeqString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SeqOneByteString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SeqTwoByteString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(InternalizedString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ConsString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ThinString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SlicedString)
OBJECT_CONSTRUCTORS_IMPL(ExternalString, String)
OBJECT_CONSTRUCTORS_IMPL(ExternalOneByteString, ExternalString)
OBJECT_CONSTRUCTORS_IMPL(ExternalTwoByteString, ExternalString)
-CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(InternalizedString)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(ThinString)
StringShape::StringShape(const String str) : type_(str.map().instance_type()) {
set_valid();
@@ -147,16 +137,17 @@ STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
-bool String::IsOneByteRepresentation() const {
- uint32_t type = map().instance_type();
+DEF_GETTER(String, IsOneByteRepresentation, bool) {
+ uint32_t type = map(isolate).instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
-bool String::IsTwoByteRepresentation() const {
- uint32_t type = map().instance_type();
+DEF_GETTER(String, IsTwoByteRepresentation, bool) {
+ uint32_t type = map(isolate).instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
+// static
bool String::IsOneByteRepresentationUnderneath(String string) {
while (true) {
uint32_t type = string.map().instance_type();
@@ -398,7 +389,7 @@ String String::GetUnderlying() {
STATIC_ASSERT(static_cast<int>(ConsString::kFirstOffset) ==
static_cast<int>(ThinString::kActualOffset));
const int kUnderlyingOffset = SlicedString::kParentOffset;
- return String::cast(READ_FIELD(*this, kUnderlyingOffset));
+ return TaggedField<String, kUnderlyingOffset>::load(*this);
}
template <class Visitor>
@@ -527,49 +518,23 @@ int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
return SizeFor(length());
}
-String SlicedString::parent() {
- return String::cast(READ_FIELD(*this, kParentOffset));
-}
-
-void SlicedString::set_parent(Isolate* isolate, String parent,
- WriteBarrierMode mode) {
+void SlicedString::set_parent(String parent, WriteBarrierMode mode) {
DCHECK(parent.IsSeqString() || parent.IsExternalString());
- WRITE_FIELD(*this, kParentOffset, parent);
- CONDITIONAL_WRITE_BARRIER(*this, kParentOffset, parent, mode);
-}
-
-SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
-
-String ConsString::first() {
- return String::cast(READ_FIELD(*this, kFirstOffset));
+ TorqueGeneratedSlicedString<SlicedString, Super>::set_parent(parent, mode);
}
-Object ConsString::unchecked_first() { return READ_FIELD(*this, kFirstOffset); }
+TQ_SMI_ACCESSORS(SlicedString, offset)
-void ConsString::set_first(Isolate* isolate, String value,
- WriteBarrierMode mode) {
- WRITE_FIELD(*this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kFirstOffset, value, mode);
-}
-
-String ConsString::second() {
- return String::cast(READ_FIELD(*this, kSecondOffset));
+Object ConsString::unchecked_first() {
+ return TaggedField<Object, kFirstOffset>::load(*this);
}
Object ConsString::unchecked_second() {
return RELAXED_READ_FIELD(*this, kSecondOffset);
}
-void ConsString::set_second(Isolate* isolate, String value,
- WriteBarrierMode mode) {
- WRITE_FIELD(*this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kSecondOffset, value, mode);
-}
-
-ACCESSORS(ThinString, actual, String, kActualOffset)
-
-HeapObject ThinString::unchecked_actual() const {
- return HeapObject::unchecked_cast(READ_FIELD(*this, kActualOffset));
+DEF_GETTER(ThinString, unchecked_actual, HeapObject) {
+ return TaggedField<HeapObject, kActualOffset>::load(isolate, *this);
}
bool ExternalString::is_uncached() const {
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index cc513f88cb..d1981fd24d 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -61,8 +61,8 @@ Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
result = flat;
}
- cons->set_first(isolate, *result);
- cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ cons->set_first(*result);
+ cons->set_second(ReadOnlyRoots(isolate).empty_string());
DCHECK(result->IsFlat());
return result;
}
@@ -146,15 +146,15 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kUncachedSize) return false;
- Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (!GetIsolateFromWritableObject(*this, &isolate)) return false;
- Heap* heap = isolate->heap();
+ if (IsReadOnlyHeapObject(*this)) return false;
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect();
+
if (has_pointers) {
- heap->NotifyObjectLayoutChange(*this, size, no_allocation);
+ isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -163,7 +163,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// the address of the backing store. When we encounter uncached external
// strings in generated code, we need to bailout to runtime.
Map new_map;
- ReadOnlyRoots roots(heap);
+ ReadOnlyRoots roots(isolate);
if (size < ExternalString::kSizeOfAllExternalStrings) {
if (is_internalized) {
new_map = roots.uncached_external_internalized_string_map();
@@ -177,10 +177,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
- ClearRecordedSlots::kNo);
+ isolate->heap()->CreateFillerObjectAt(
+ this->address() + new_size, size - new_size, ClearRecordedSlots::kNo);
if (has_pointers) {
- heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ isolate->heap()->ClearRecordedSlotRange(this->address(),
+ this->address() + new_size);
}
// We are storing the new map using release store after creating a filler for
@@ -189,7 +190,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
self.SetResource(isolate, resource);
- heap->RegisterExternalString(*this);
+ isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
@@ -218,18 +219,16 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kUncachedSize) return false;
- Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (!GetIsolateFromWritableObject(*this, &isolate)) return false;
- Heap* heap = isolate->heap();
+ if (IsReadOnlyHeapObject(*this)) return false;
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
- heap->NotifyObjectLayoutChange(*this, size, no_allocation);
+ isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
}
-
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -237,7 +236,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// the address of the backing store. When we encounter uncached external
// strings in generated code, we need to bailout to runtime.
Map new_map;
- ReadOnlyRoots roots(heap);
+ ReadOnlyRoots roots(isolate);
if (size < ExternalString::kSizeOfAllExternalStrings) {
new_map = is_internalized
? roots.uncached_external_one_byte_internalized_string_map()
@@ -250,10 +249,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
- ClearRecordedSlots::kNo);
+ isolate->heap()->CreateFillerObjectAt(
+ this->address() + new_size, size - new_size, ClearRecordedSlots::kNo);
if (has_pointers) {
- heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ isolate->heap()->ClearRecordedSlotRange(this->address(),
+ this->address() + new_size);
}
// We are storing the new map using release store after creating a filler for
@@ -262,7 +262,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
ExternalOneByteString self = ExternalOneByteString::cast(*this);
self.SetResource(isolate, resource);
- heap->RegisterExternalString(*this);
+ isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
@@ -272,9 +272,8 @@ bool String::SupportsExternalization() {
return i::ThinString::cast(*this).actual().SupportsExternalization();
}
- Isolate* isolate;
// RO_SPACE strings cannot be externalized.
- if (!GetIsolateFromWritableObject(*this, &isolate)) {
+ if (IsReadOnlyHeapObject(*this)) {
return false;
}
@@ -290,6 +289,7 @@ bool String::SupportsExternalization() {
DCHECK_LE(ExternalString::kUncachedSize, this->Size());
#endif
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
return !isolate->heap()->IsInGCPostProcessing();
}
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 74fc8fa763..1a826eee3b 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -79,7 +79,7 @@ class StringShape {
// ordered sequence of zero or more 16-bit unsigned integer values.
//
// All string values have a length field.
-class String : public Name {
+class String : public TorqueGeneratedString<String, Name> {
public:
enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
@@ -152,21 +152,18 @@ class String : public Name {
template <typename Char>
inline const Char* GetChars(const DisallowHeapAllocation& no_gc);
- // Get and set the length of the string.
- inline int length() const;
- inline void set_length(int value);
-
// Get and set the length of the string using acquire loads and release
// stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length)
// Returns whether this string has only one-byte chars, i.e. all of them can
// be one-byte encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
// two-byte external representations even for one-byte data.
inline bool IsOneByteRepresentation() const;
+ inline bool IsOneByteRepresentation(Isolate* isolate) const;
inline bool IsTwoByteRepresentation() const;
+ inline bool IsTwoByteRepresentation(Isolate* isolate) const;
// Cons and slices have an encoding flag that may not represent the actual
// encoding of the underlying string. This is taken into account here.
@@ -320,8 +317,6 @@ class String : public Name {
static Handle<String> Trim(Isolate* isolate, Handle<String> string,
TrimMode mode);
- DECL_CAST(String)
-
V8_EXPORT_PRIVATE void PrintOn(FILE* out);
// For use during stack traces. Performs rudimentary sanity check.
@@ -338,9 +333,6 @@ class String : public Name {
inline bool IsFlat();
- DEFINE_FIELD_OFFSET_CONSTANTS(Name::kHeaderSize,
- TORQUE_GENERATED_STRING_FIELDS)
-
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
@@ -453,7 +445,7 @@ class String : public Name {
// Compute and set the hash code.
V8_EXPORT_PRIVATE uint32_t ComputeAndSetHash();
- OBJECT_CONSTRUCTORS(String, Name);
+ TQ_OBJECT_CONSTRUCTORS(String)
};
// clang-format off
@@ -477,30 +469,29 @@ class SubStringRange {
};
// The SeqString abstract class captures sequential string values.
-class SeqString : public String {
+class SeqString : public TorqueGeneratedSeqString<SeqString, String> {
public:
- DECL_CAST(SeqString)
-
// Truncate the string in-place if possible and return the result.
// In case of new_length == 0, the empty string is returned without
// truncating the original string.
V8_WARN_UNUSED_RESULT static Handle<String> Truncate(Handle<SeqString> string,
int new_length);
- OBJECT_CONSTRUCTORS(SeqString, String);
+ TQ_OBJECT_CONSTRUCTORS(SeqString)
};
-class InternalizedString : public String {
+class InternalizedString
+ : public TorqueGeneratedInternalizedString<InternalizedString, String> {
public:
- DECL_CAST(InternalizedString)
// TODO(neis): Possibly move some stuff from String here.
- OBJECT_CONSTRUCTORS(InternalizedString, String);
+ TQ_OBJECT_CONSTRUCTORS(InternalizedString)
};
// The OneByteString class captures sequential one-byte string objects.
// Each character in the OneByteString is an one-byte character.
-class SeqOneByteString : public SeqString {
+class SeqOneByteString
+ : public TorqueGeneratedSeqOneByteString<SeqOneByteString, SeqString> {
public:
static const bool kHasOneByteEncoding = true;
using Char = uint8_t;
@@ -518,8 +509,6 @@ class SeqOneByteString : public SeqString {
// is deterministic.
void clear_padding();
- DECL_CAST(SeqOneByteString)
-
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of an OneByteString
// instance.
@@ -537,12 +526,13 @@ class SeqOneByteString : public SeqString {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(SeqOneByteString, SeqString);
+ TQ_OBJECT_CONSTRUCTORS(SeqOneByteString)
};
// The TwoByteString class captures sequential unicode string objects.
// Each character in the TwoByteString is a two-byte uint16_t.
-class SeqTwoByteString : public SeqString {
+class SeqTwoByteString
+ : public TorqueGeneratedSeqTwoByteString<SeqTwoByteString, SeqString> {
public:
static const bool kHasOneByteEncoding = false;
using Char = uint16_t;
@@ -560,8 +550,6 @@ class SeqTwoByteString : public SeqString {
// is deterministic.
void clear_padding();
- DECL_CAST(SeqTwoByteString)
-
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of a TwoByteString
// instance.
@@ -580,7 +568,7 @@ class SeqTwoByteString : public SeqString {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(SeqTwoByteString, SeqString);
+ TQ_OBJECT_CONSTRUCTORS(SeqTwoByteString)
};
// The ConsString class describes string values built by using the
@@ -591,32 +579,19 @@ class SeqTwoByteString : public SeqString {
// are non-ConsString string values. The string value represented by
// a ConsString can be obtained by concatenating the leaf string
// values in a left-to-right depth-first traversal of the tree.
-class ConsString : public String {
+class ConsString : public TorqueGeneratedConsString<ConsString, String> {
public:
- // First string of the cons cell.
- inline String first();
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
inline Object unchecked_first();
- inline void set_first(Isolate* isolate, String first,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- // Second string of the cons cell.
- inline String second();
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
inline Object unchecked_second();
- inline void set_second(Isolate* isolate, String second,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t Get(int index);
- DECL_CAST(ConsString)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
- TORQUE_GENERATED_CONS_STRING_FIELDS)
-
// Minimum length for a cons string.
static const int kMinLength = 13;
@@ -624,7 +599,7 @@ class ConsString : public String {
DECL_VERIFIER(ConsString)
- OBJECT_CONSTRUCTORS(ConsString, String);
+ TQ_OBJECT_CONSTRUCTORS(ConsString)
};
// The ThinString class describes string objects that are just references
@@ -634,25 +609,18 @@ class ConsString : public String {
// internalized version (which is allocated as a new object).
// In terms of memory layout and most algorithms operating on strings,
// ThinStrings can be thought of as "one-part cons strings".
-class ThinString : public String {
+class ThinString : public TorqueGeneratedThinString<ThinString, String> {
public:
- // Actual string that this ThinString refers to.
- inline String actual() const;
inline HeapObject unchecked_actual() const;
- inline void set_actual(String s,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline HeapObject unchecked_actual(Isolate* isolate) const;
V8_EXPORT_PRIVATE uint16_t Get(int index);
- DECL_CAST(ThinString)
DECL_VERIFIER(ThinString)
- DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
- TORQUE_GENERATED_THIN_STRING_FIELDS)
-
using BodyDescriptor = FixedBodyDescriptor<kActualOffset, kSize, kSize>;
- OBJECT_CONSTRUCTORS(ThinString, String);
+ TQ_OBJECT_CONSTRUCTORS(ThinString)
};
// The Sliced String class describes strings that are substrings of another
@@ -667,22 +635,14 @@ class ThinString : public String {
// - handling externalized parent strings
// - external strings as parent
// - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
-class SlicedString : public String {
+class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
public:
- inline String parent();
- inline void set_parent(Isolate* isolate, String parent,
+ inline void set_parent(String parent,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline int offset() const;
- inline void set_offset(int offset);
-
+ DECL_INT_ACCESSORS(offset)
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t Get(int index);
- DECL_CAST(SlicedString)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
- TORQUE_GENERATED_SLICED_STRING_FIELDS)
-
// Minimum length for a sliced string.
static const int kMinLength = 13;
@@ -690,7 +650,7 @@ class SlicedString : public String {
DECL_VERIFIER(SlicedString)
- OBJECT_CONSTRUCTORS(SlicedString, String);
+ TQ_OBJECT_CONSTRUCTORS(SlicedString)
};
// The ExternalString class describes string values that are backed by
@@ -705,6 +665,7 @@ class SlicedString : public String {
class ExternalString : public String {
public:
DECL_CAST(ExternalString)
+ DECL_VERIFIER(ExternalString)
DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
TORQUE_GENERATED_EXTERNAL_STRING_FIELDS)
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
new file mode 100644
index 0000000000..0cca30a37b
--- /dev/null
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -0,0 +1,108 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/synthetic-module.h"
+
+#include "src/api/api-inl.h"
+#include "src/builtins/accessors.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// Implements SetSyntheticModuleBinding:
+// https://heycam.github.io/webidl/#setsyntheticmoduleexport
+void SyntheticModule::SetExport(Isolate* isolate,
+ Handle<SyntheticModule> module,
+ Handle<String> export_name,
+ Handle<Object> export_value) {
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ Handle<Object> export_object(exports->Lookup(export_name), isolate);
+ CHECK(export_object->IsCell());
+ Handle<Cell> export_cell(Handle<Cell>::cast(export_object));
+ // Spec step 2: Set the mutable binding of export_name to export_value
+ export_cell->set_value(*export_value);
+}
+
+// Implements Synthetic Module Record's ResolveExport concrete method:
+// https://heycam.github.io/webidl/#smr-resolveexport
+MaybeHandle<Cell> SyntheticModule::ResolveExport(
+ Isolate* isolate, Handle<SyntheticModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve) {
+ Handle<Object> object(module->exports().Lookup(export_name), isolate);
+ if (object->IsCell()) {
+ return Handle<Cell>::cast(object);
+ }
+
+ if (must_resolve) {
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
+ module_specifier, export_name),
+ &loc);
+ }
+
+ return MaybeHandle<Cell>();
+}
+
+// Implements Synthetic Module Record's Instantiate concrete method :
+// https://heycam.github.io/webidl/#smr-instantiate
+bool SyntheticModule::PrepareInstantiate(Isolate* isolate,
+ Handle<SyntheticModule> module,
+ v8::Local<v8::Context> context,
+ v8::Module::ResolveCallback callback) {
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ Handle<FixedArray> export_names(module->export_names(), isolate);
+ // Spec step 7: For each export_name in module->export_names...
+ for (int i = 0, n = export_names->length(); i < n; ++i) {
+ // Spec step 7.1: Create a new mutable binding for export_name.
+ // Spec step 7.2: Initialize the new mutable binding to undefined.
+ Handle<Cell> cell =
+ isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ Handle<String> name(String::cast(export_names->get(i)), isolate);
+ CHECK(exports->Lookup(name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, cell);
+ }
+ module->set_exports(*exports);
+ return true;
+}
+
+// Second step of module instantiation. No real work to do for SyntheticModule
+// as there are no imports or indirect exports to resolve;
+// just update status.
+bool SyntheticModule::FinishInstantiate(Isolate* isolate,
+ Handle<SyntheticModule> module) {
+ module->SetStatus(kInstantiated);
+ return true;
+}
+
+// Implements Synthetic Module Record's Evaluate concrete method:
+// https://heycam.github.io/webidl/#smr-evaluate
+MaybeHandle<Object> SyntheticModule::Evaluate(Isolate* isolate,
+ Handle<SyntheticModule> module) {
+ module->SetStatus(kEvaluating);
+
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps =
+ FUNCTION_CAST<v8::Module::SyntheticModuleEvaluationSteps>(
+ module->evaluation_steps().foreign_address());
+ v8::Local<v8::Value> result;
+ if (!evaluation_steps(
+ Utils::ToLocal(Handle<Context>::cast(isolate->native_context())),
+ Utils::ToLocal(Handle<Module>::cast(module)))
+ .ToLocal(&result)) {
+ isolate->PromoteScheduledException();
+ module->RecordError(isolate);
+ return MaybeHandle<Object>();
+ }
+
+ module->SetStatus(kEvaluated);
+ return Utils::OpenHandle(*result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/synthetic-module.h b/deps/v8/src/objects/synthetic-module.h
new file mode 100644
index 0000000000..9f91f2ce4a
--- /dev/null
+++ b/deps/v8/src/objects/synthetic-module.h
@@ -0,0 +1,69 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SYNTHETIC_MODULE_H_
+#define V8_OBJECTS_SYNTHETIC_MODULE_H_
+
+#include "src/objects/module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The runtime representation of a Synthetic Module Record, a module that can be
+// instantiated by an embedder with embedder-defined exports and evaluation
+// steps.
+// https://heycam.github.io/webidl/#synthetic-module-records
+class SyntheticModule : public Module {
+ public:
+ NEVER_READ_ONLY_SPACE
+ DECL_CAST(SyntheticModule)
+ DECL_VERIFIER(SyntheticModule)
+ DECL_PRINTER(SyntheticModule)
+
+ // The list of all names exported by this module
+ DECL_ACCESSORS(name, String)
+ DECL_ACCESSORS(export_names, FixedArray)
+ DECL_ACCESSORS(evaluation_steps, Foreign)
+
+ static void SetExport(Isolate* isolate, Handle<SyntheticModule> module,
+ Handle<String> export_name,
+ Handle<Object> export_value);
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(Module::kHeaderSize,
+ TORQUE_GENERATED_SYNTHETIC_MODULE_FIELDS)
+
+ using BodyDescriptor = SubclassBodyDescriptor<
+ Module::BodyDescriptor,
+ FixedBodyDescriptor<kExportNamesOffset, kSize, kSize>>;
+
+ private:
+ friend class Module;
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExport(
+ Isolate* isolate, Handle<SyntheticModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve);
+
+ static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
+ Isolate* isolate, Handle<SyntheticModule> module,
+ v8::Local<v8::Context> context, v8::Module::ResolveCallback callback);
+ static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
+ Isolate* isolate, Handle<SyntheticModule> module);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
+ Isolate* isolate, Handle<SyntheticModule> module);
+
+ OBJECT_CONSTRUCTORS(SyntheticModule, Module);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SYNTHETIC_MODULE_H_
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
new file mode 100644
index 0000000000..3cce536a14
--- /dev/null
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -0,0 +1,162 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_FIELD_INL_H_
+#define V8_OBJECTS_TAGGED_FIELD_INL_H_
+
+#include "src/objects/tagged-field.h"
+
+#include "src/common/ptr-compr-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+template <typename T, int kFieldOffset>
+Address TaggedField<T, kFieldOffset>::address(HeapObject host, int offset) {
+ return host.address() + kFieldOffset + offset;
+}
+
+// static
+template <typename T, int kFieldOffset>
+Tagged_t* TaggedField<T, kFieldOffset>::location(HeapObject host, int offset) {
+ return reinterpret_cast<Tagged_t*>(address(host, offset));
+}
+
+// static
+template <typename T, int kFieldOffset>
+template <typename TOnHeapAddress>
+Address TaggedField<T, kFieldOffset>::tagged_to_full(
+ TOnHeapAddress on_heap_addr, Tagged_t tagged_value) {
+#ifdef V8_COMPRESS_POINTERS
+ if (kIsSmi) {
+ return DecompressTaggedSigned(tagged_value);
+ } else if (kIsHeapObject) {
+ return DecompressTaggedPointer(on_heap_addr, tagged_value);
+ } else {
+ return DecompressTaggedAny(on_heap_addr, tagged_value);
+ }
+#else
+ return tagged_value;
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+Tagged_t TaggedField<T, kFieldOffset>::full_to_tagged(Address value) {
+#ifdef V8_COMPRESS_POINTERS
+ return CompressTagged(value);
+#else
+ return value;
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::load(HeapObject host, int offset) {
+ Tagged_t value = *location(host, offset);
+ return T(tagged_to_full(host.ptr(), value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::load(Isolate* isolate, HeapObject host,
+ int offset) {
+ Tagged_t value = *location(host, offset);
+ return T(tagged_to_full(isolate, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
+#ifdef V8_CONCURRENT_MARKING
+ Relaxed_Store(host, value);
+#else
+ *location(host) = full_to_tagged(value.ptr());
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
+#ifdef V8_CONCURRENT_MARKING
+ Relaxed_Store(host, offset, value);
+#else
+ *location(host, offset) = full_to_tagged(value.ptr());
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host, int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
+ return T(tagged_to_full(host.ptr(), value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Relaxed_Load(Isolate* isolate, HeapObject host,
+ int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
+ return T(tagged_to_full(isolate, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, T value) {
+ AsAtomicTagged::Relaxed_Store(location(host), full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, int offset,
+ T value) {
+ AsAtomicTagged::Relaxed_Store(location(host, offset),
+ full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host, int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
+ return T(tagged_to_full(host.ptr(), value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Acquire_Load(Isolate* isolate, HeapObject host,
+ int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
+ return T(tagged_to_full(isolate, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, T value) {
+ AsAtomicTagged::Release_Store(location(host), full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, int offset,
+ T value) {
+ AsAtomicTagged::Release_Store(location(host, offset),
+ full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+Tagged_t TaggedField<T, kFieldOffset>::Release_CompareAndSwap(HeapObject host,
+ T old, T value) {
+ Tagged_t old_value = full_to_tagged(old.ptr());
+ Tagged_t new_value = full_to_tagged(value.ptr());
+ Tagged_t result = AsAtomicTagged::Release_CompareAndSwap(
+ location(host), old_value, new_value);
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_FIELD_INL_H_
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
new file mode 100644
index 0000000000..fbaaee5930
--- /dev/null
+++ b/deps/v8/src/objects/tagged-field.h
@@ -0,0 +1,76 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_FIELD_H_
+#define V8_OBJECTS_TAGGED_FIELD_H_
+
+#include "src/common/globals.h"
+
+#include "src/objects/objects.h"
+#include "src/objects/tagged-value.h"
+
+namespace v8 {
+namespace internal {
+
+// This helper static class represents a tagged field of type T at offset
+// kFieldOffset inside some host HeapObject.
+// For full-pointer mode this type adds no overhead but when pointer
+// compression is enabled such class allows us to use proper decompression
+// function depending on the field type.
+template <typename T, int kFieldOffset = 0>
+class TaggedField : public AllStatic {
+ public:
+ static_assert(std::is_base_of<Object, T>::value ||
+ std::is_same<MapWord, T>::value ||
+ std::is_same<MaybeObject, T>::value,
+ "T must be strong or weak tagged type or MapWord");
+
+ // True for Smi fields.
+ static constexpr bool kIsSmi = std::is_base_of<Smi, T>::value;
+
+ // True for HeapObject and MapWord fields. The latter may look like a Smi
+ // if it contains forwarding pointer but still requires tagged pointer
+ // decompression.
+ static constexpr bool kIsHeapObject =
+ std::is_base_of<HeapObject, T>::value || std::is_same<MapWord, T>::value;
+
+ static inline Address address(HeapObject host, int offset = 0);
+
+ static inline T load(HeapObject host, int offset = 0);
+ static inline T load(Isolate* isolate, HeapObject host, int offset = 0);
+
+ static inline void store(HeapObject host, T value);
+ static inline void store(HeapObject host, int offset, T value);
+
+ static inline T Relaxed_Load(HeapObject host, int offset = 0);
+ static inline T Relaxed_Load(Isolate* isolate, HeapObject host,
+ int offset = 0);
+
+ static inline void Relaxed_Store(HeapObject host, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
+
+ static inline T Acquire_Load(HeapObject host, int offset = 0);
+ static inline T Acquire_Load(Isolate* isolate, HeapObject host,
+ int offset = 0);
+
+ static inline void Release_Store(HeapObject host, T value);
+ static inline void Release_Store(HeapObject host, int offset, T value);
+
+ static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
+ T value);
+
+ private:
+ static inline Tagged_t* location(HeapObject host, int offset = 0);
+
+ template <typename TOnHeapAddress>
+ static inline Address tagged_to_full(TOnHeapAddress on_heap_addr,
+ Tagged_t tagged_value);
+
+ static inline Tagged_t full_to_tagged(Address value);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_FIELD_H_
diff --git a/deps/v8/src/objects/tagged-impl-inl.h b/deps/v8/src/objects/tagged-impl-inl.h
index f735a241a8..909f65a959 100644
--- a/deps/v8/src/objects/tagged-impl-inl.h
+++ b/deps/v8/src/objects/tagged-impl-inl.h
@@ -52,11 +52,11 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
- ROOT_PARAM, HeapObject* result) const {
+ Isolate* isolate, HeapObject* result) const {
if (kIsFull) return GetHeapObject(result);
// Implementation for compressed pointers.
if (!IsStrongOrWeak()) return false;
- *result = GetHeapObject(ROOT_VALUE);
+ *result = GetHeapObject(isolate);
return true;
}
@@ -79,14 +79,14 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
- ROOT_PARAM, HeapObject* result,
+ Isolate* isolate, HeapObject* result,
HeapObjectReferenceType* reference_type) const {
if (kIsFull) return GetHeapObject(result, reference_type);
// Implementation for compressed pointers.
if (!IsStrongOrWeak()) return false;
*reference_type = IsWeakOrCleared() ? HeapObjectReferenceType::WEAK
: HeapObjectReferenceType::STRONG;
- *result = GetHeapObject(ROOT_VALUE);
+ *result = GetHeapObject(isolate);
return true;
}
@@ -107,12 +107,12 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
- ROOT_PARAM, HeapObject* result) const {
+ Isolate* isolate, HeapObject* result) const {
if (kIsFull) return GetHeapObjectIfStrong(result);
// Implementation for compressed pointers.
if (IsStrong()) {
- *result =
- HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ *result = HeapObject::cast(
+ Object(DecompressTaggedPointer(isolate, static_cast<Tagged_t>(ptr_))));
return true;
}
return false;
@@ -132,11 +132,12 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong()
template <HeapObjectReferenceType kRefType, typename StorageType>
HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong(
- ROOT_PARAM) const {
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObjectAssumeStrong();
// Implementation for compressed pointers.
DCHECK(IsStrong());
- return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ return HeapObject::cast(
+ Object(DecompressTaggedPointer(isolate, static_cast<Tagged_t>(ptr_))));
}
//
@@ -161,12 +162,12 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
- ROOT_PARAM, HeapObject* result) const {
+ Isolate* isolate, HeapObject* result) const {
if (kIsFull) return GetHeapObjectIfWeak(result);
// Implementation for compressed pointers.
if (kCanBeWeak) {
if (IsWeak()) {
- *result = GetHeapObject(ROOT_VALUE);
+ *result = GetHeapObject(isolate);
return true;
}
return false;
@@ -189,11 +190,11 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak() const {
template <HeapObjectReferenceType kRefType, typename StorageType>
HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak(
- ROOT_PARAM) const {
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObjectAssumeWeak();
// Implementation for compressed pointers.
DCHECK(IsWeak());
- return GetHeapObject(ROOT_VALUE);
+ return GetHeapObject(isolate);
}
//
@@ -214,17 +215,19 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject() const {
}
template <HeapObjectReferenceType kRefType, typename StorageType>
-HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(ROOT_PARAM) const {
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObject();
// Implementation for compressed pointers.
DCHECK(!IsSmi());
if (kCanBeWeak) {
DCHECK(!IsCleared());
- return HeapObject::cast(Object(
- DecompressTaggedPointer(ROOT_VALUE, ptr_ & ~kWeakHeapObjectMask)));
+ return HeapObject::cast(Object(DecompressTaggedPointer(
+ isolate, static_cast<Tagged_t>(ptr_) & ~kWeakHeapObjectMask)));
} else {
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
- return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ return HeapObject::cast(
+ Object(DecompressTaggedPointer(isolate, static_cast<Tagged_t>(ptr_))));
}
}
@@ -242,13 +245,14 @@ Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi() const {
}
template <HeapObjectReferenceType kRefType, typename StorageType>
-Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi(ROOT_PARAM) const {
+Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi(
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObjectOrSmi();
// Implementation for compressed pointers.
if (IsSmi()) {
- return Object(DecompressTaggedSigned(ptr_));
+ return Object(DecompressTaggedSigned(static_cast<Tagged_t>(ptr_)));
}
- return GetHeapObject(ROOT_VALUE);
+ return GetHeapObject(isolate);
}
} // namespace internal
diff --git a/deps/v8/src/objects/tagged-impl.h b/deps/v8/src/objects/tagged-impl.h
index e3d982565f..111eabae2f 100644
--- a/deps/v8/src/objects/tagged-impl.h
+++ b/deps/v8/src/objects/tagged-impl.h
@@ -40,16 +40,24 @@ class TaggedImpl {
// Make clang on Linux catch what MSVC complains about on Windows:
operator bool() const = delete;
- constexpr bool operator==(TaggedImpl other) const {
- return ptr_ == other.ptr_;
+ template <typename U>
+ constexpr bool operator==(TaggedImpl<kRefType, U> other) const {
+ static_assert(
+ std::is_same<U, Address>::value || std::is_same<U, Tagged_t>::value,
+ "U must be either Address or Tagged_t");
+ return static_cast<Tagged_t>(ptr_) == static_cast<Tagged_t>(other.ptr());
}
- constexpr bool operator!=(TaggedImpl other) const {
- return ptr_ != other.ptr_;
+ template <typename U>
+ constexpr bool operator!=(TaggedImpl<kRefType, U> other) const {
+ static_assert(
+ std::is_same<U, Address>::value || std::is_same<U, Tagged_t>::value,
+ "U must be either Address or Tagged_t");
+ return static_cast<Tagged_t>(ptr_) != static_cast<Tagged_t>(other.ptr());
}
// For using in std::set and std::map.
constexpr bool operator<(TaggedImpl other) const {
- return ptr_ < other.ptr();
+ return static_cast<Tagged_t>(ptr_) < static_cast<Tagged_t>(other.ptr());
}
constexpr StorageType ptr() const { return ptr_; }
@@ -99,50 +107,51 @@ class TaggedImpl {
//
// The following set of methods get HeapObject out of the tagged value
- // which may involve decompression in which case the ROOT_PARAM is required.
+ // which may involve decompression in which case the isolate root is required.
// If the pointer compression is not enabled then the variants with
- // ROOT_PARAM will be exactly the same as non-ROOT_PARAM ones.
+ // isolate parameter will be exactly the same as the ones witout isolate
+ // parameter.
//
// If this tagged value is a strong pointer to a HeapObject, returns true and
// sets *result. Otherwise returns false.
inline bool GetHeapObjectIfStrong(HeapObject* result) const;
- inline bool GetHeapObjectIfStrong(ROOT_PARAM, HeapObject* result) const;
+ inline bool GetHeapObjectIfStrong(Isolate* isolate, HeapObject* result) const;
// DCHECKs that this tagged value is a strong pointer to a HeapObject and
// returns the HeapObject.
inline HeapObject GetHeapObjectAssumeStrong() const;
- inline HeapObject GetHeapObjectAssumeStrong(ROOT_PARAM) const;
+ inline HeapObject GetHeapObjectAssumeStrong(Isolate* isolate) const;
// If this tagged value is a weak pointer to a HeapObject, returns true and
// sets *result. Otherwise returns false.
inline bool GetHeapObjectIfWeak(HeapObject* result) const;
- inline bool GetHeapObjectIfWeak(ROOT_PARAM, HeapObject* result) const;
+ inline bool GetHeapObjectIfWeak(Isolate* isolate, HeapObject* result) const;
// DCHECKs that this tagged value is a weak pointer to a HeapObject and
// returns the HeapObject.
inline HeapObject GetHeapObjectAssumeWeak() const;
- inline HeapObject GetHeapObjectAssumeWeak(ROOT_PARAM) const;
+ inline HeapObject GetHeapObjectAssumeWeak(Isolate* isolate) const;
// If this tagged value is a strong or weak pointer to a HeapObject, returns
// true and sets *result. Otherwise returns false.
inline bool GetHeapObject(HeapObject* result) const;
- inline bool GetHeapObject(ROOT_PARAM, HeapObject* result) const;
+ inline bool GetHeapObject(Isolate* isolate, HeapObject* result) const;
inline bool GetHeapObject(HeapObject* result,
HeapObjectReferenceType* reference_type) const;
- inline bool GetHeapObject(ROOT_PARAM, HeapObject* result,
+ inline bool GetHeapObject(Isolate* isolate, HeapObject* result,
HeapObjectReferenceType* reference_type) const;
// DCHECKs that this tagged value is a strong or a weak pointer to a
// HeapObject and returns the HeapObject.
inline HeapObject GetHeapObject() const;
- inline HeapObject GetHeapObject(ROOT_PARAM) const;
+ inline HeapObject GetHeapObject(Isolate* isolate) const;
// DCHECKs that this tagged value is a strong or a weak pointer to a
// HeapObject or a Smi and returns the HeapObject or Smi.
inline Object GetHeapObjectOrSmi() const;
- inline Object GetHeapObjectOrSmi(ROOT_PARAM) const;
+ inline Object GetHeapObjectOrSmi(Isolate* isolate) const;
// Cast operation is available only for full non-weak tagged values.
template <typename T>
diff --git a/deps/v8/src/objects/tagged-value-inl.h b/deps/v8/src/objects/tagged-value-inl.h
index 5eb0e20947..f409a4006b 100644
--- a/deps/v8/src/objects/tagged-value-inl.h
+++ b/deps/v8/src/objects/tagged-value-inl.h
@@ -9,7 +9,8 @@
#include "include/v8-internal.h"
#include "src/common/ptr-compr-inl.h"
-#include "src/objects/heap-object-inl.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/objects.h"
#include "src/objects/oddball.h"
#include "src/objects/tagged-impl-inl.h"
#include "src/roots/roots-inl.h"
@@ -17,17 +18,37 @@
namespace v8 {
namespace internal {
-Object StrongTaggedValue::ToObject(WITH_ROOT_PARAM(StrongTaggedValue object)) {
+inline StrongTaggedValue::StrongTaggedValue(Object o)
+ :
#ifdef V8_COMPRESS_POINTERS
- return Object(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+ TaggedImpl(CompressTagged(o.ptr()))
+#else
+ TaggedImpl(o.ptr())
+#endif
+{
+}
+
+Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) {
+#ifdef V8_COMPRESS_POINTERS
+ return Object(DecompressTaggedAny(isolate, object.ptr()));
#else
return Object(object.ptr());
#endif
}
-MaybeObject TaggedValue::ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object)) {
+inline TaggedValue::TaggedValue(MaybeObject o)
+ :
+#ifdef V8_COMPRESS_POINTERS
+ TaggedImpl(CompressTagged(o.ptr()))
+#else
+ TaggedImpl(o.ptr())
+#endif
+{
+}
+
+MaybeObject TaggedValue::ToMaybeObject(Isolate* isolate, TaggedValue object) {
#ifdef V8_COMPRESS_POINTERS
- return MaybeObject(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+ return MaybeObject(DecompressTaggedAny(isolate, object.ptr()));
#else
return MaybeObject(object.ptr());
#endif
diff --git a/deps/v8/src/objects/tagged-value.h b/deps/v8/src/objects/tagged-value.h
index bb7609f7c3..7b6192204a 100644
--- a/deps/v8/src/objects/tagged-value.h
+++ b/deps/v8/src/objects/tagged-value.h
@@ -21,8 +21,9 @@ class StrongTaggedValue
public:
constexpr StrongTaggedValue() : TaggedImpl() {}
explicit constexpr StrongTaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+ explicit StrongTaggedValue(Object o);
- inline static Object ToObject(WITH_ROOT_PARAM(StrongTaggedValue object));
+ inline static Object ToObject(Isolate* isolate, StrongTaggedValue object);
};
// Almost same as MaybeObject but this one deals with in-heap and potentially
@@ -32,8 +33,9 @@ class TaggedValue : public TaggedImpl<HeapObjectReferenceType::WEAK, Tagged_t> {
public:
constexpr TaggedValue() : TaggedImpl() {}
explicit constexpr TaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+ explicit TaggedValue(MaybeObject o);
- inline static MaybeObject ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object));
+ inline static MaybeObject ToMaybeObject(Isolate* isolate, TaggedValue object);
};
} // namespace internal
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 2f34a48a2a..d5b6293afe 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -16,11 +16,9 @@ namespace internal {
// static
Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
- Isolate* isolate, Handle<Context> native_context,
+ Isolate* isolate, Handle<NativeContext> native_context,
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id) {
- DCHECK(native_context->IsNativeContext());
-
// Check the template weakmap to see if the template object already exists.
Handle<EphemeronHashTable> template_weakmap =
native_context->template_weakmap().IsUndefined(isolate)
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 220f9dab1e..20ad742338 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -47,7 +47,7 @@ class TemplateObjectDescription final : public Struct {
DECL_CAST(TemplateObjectDescription)
static Handle<JSArray> GetTemplateObject(
- Isolate* isolate, Handle<Context> native_context,
+ Isolate* isolate, Handle<NativeContext> native_context,
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id);
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index a1a098ffc0..d344174a0c 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -55,7 +55,7 @@ SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- HeapObject extra = function_template_info->rare_data();
+ HeapObject extra = function_template_info->rare_data(isolate);
if (extra.IsUndefined(isolate)) {
return AllocateFunctionTemplateRareData(isolate, function_template_info);
} else {
@@ -64,9 +64,9 @@ FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
}
#define RARE_ACCESSORS(Name, CamelName, Type) \
- Type FunctionTemplateInfo::Get##CamelName() { \
- HeapObject extra = rare_data(); \
- HeapObject undefined = GetReadOnlyRoots().undefined_value(); \
+ DEF_GETTER(FunctionTemplateInfo, Get##CamelName, Type) { \
+ HeapObject extra = rare_data(isolate); \
+ HeapObject undefined = GetReadOnlyRoots(isolate).undefined_value(); \
return extra == undefined ? undefined \
: FunctionTemplateRareData::cast(extra).Name(); \
} \
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 66cd038114..99142266ed 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -86,7 +86,7 @@ class FunctionTemplateInfo : public TemplateInfo {
DECL_ACCESSORS(rare_data, HeapObject)
#define DECL_RARE_ACCESSORS(Name, CamelName, Type) \
- inline Type Get##CamelName(); \
+ DECL_GETTER(Get##CamelName, Type) \
static inline void Set##CamelName( \
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
Handle<Type> Name);
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index 893de78dc4..048774f49b 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -102,9 +102,8 @@ PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
return descriptors.GetDetails(descriptor);
}
-// static
PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
- return transition.GetLastDescriptorDetails();
+ return transition.GetLastDescriptorDetails(isolate_);
}
// static
@@ -195,13 +194,13 @@ void TransitionsAccessor::Reload() {
}
void TransitionsAccessor::Initialize() {
- raw_transitions_ = map_.raw_transitions();
+ raw_transitions_ = map_.raw_transitions(isolate_);
HeapObject heap_object;
if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
encoding_ = kUninitialized;
} else if (raw_transitions_->IsWeak()) {
encoding_ = kWeakRef;
- } else if (raw_transitions_->GetHeapObjectIfStrong(&heap_object)) {
+ } else if (raw_transitions_->GetHeapObjectIfStrong(isolate_, &heap_object)) {
if (heap_object.IsTransitionArray()) {
encoding_ = kFullTransitionArray;
} else if (heap_object.IsPrototypeInfo()) {
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index a2cd102aaf..843b790b7d 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -226,7 +226,7 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
Map target = SearchTransition(*name, kData, attributes);
if (target.is_null()) return MaybeHandle<Map>();
- PropertyDetails details = target.GetLastDescriptorDetails();
+ PropertyDetails details = target.GetLastDescriptorDetails(isolate_);
DCHECK_EQ(attributes, details.attributes());
DCHECK_EQ(kData, details.kind());
if (requested_location == kFieldOnly && details.location() != kField) {
diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h
index b4dadcc22a..f21e8cd54e 100644
--- a/deps/v8/src/objects/transitions.h
+++ b/deps/v8/src/objects/transitions.h
@@ -147,7 +147,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
friend class MarkCompactCollector; // For HasSimpleTransitionTo.
friend class TransitionArray;
- static inline PropertyDetails GetSimpleTargetDetails(Map transition);
+ inline PropertyDetails GetSimpleTargetDetails(Map transition);
static inline Name GetSimpleTransitionKey(Map transition);
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 331a12b157..5a72dd6532 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -22,6 +22,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
#include "src/snapshot/code-serializer.h"
@@ -65,9 +66,6 @@ static size_t BytesNeededForVarint(T value) {
return result;
}
-// Note that some additional tag values are defined in Blink's
-// Source/bindings/core/v8/serialization/SerializationTag.h, which must
-// not clash with values defined here.
enum class SerializationTag : uint8_t {
// version:uint32_t (if at beginning of data, sets version > 0)
kVersion = 0xFF,
@@ -161,6 +159,40 @@ enum class SerializationTag : uint8_t {
// A transferred WebAssembly.Memory object. maximumPages:int32_t, then by
// SharedArrayBuffer tag and its data.
kWasmMemoryTransfer = 'm',
+ // A list of (subtag: ErrorTag, [subtag dependent data]). See ErrorTag for
+ // details.
+ kError = 'r',
+
+ // The following tags are reserved because they were in use in Chromium before
+ // the kHostObject tag was introduced in format version 13, at
+ // v8 refs/heads/master@{#43466}
+ // chromium/src refs/heads/master@{#453568}
+ //
+ // They must not be reused without a version check to prevent old values from
+ // starting to deserialize incorrectly. For simplicity, it's recommended to
+ // avoid them altogether.
+ //
+ // This is the set of tags that existed in SerializationTag.h at that time and
+ // still exist at the time of this writing (i.e., excluding those that were
+ // removed on the Chromium side because there should be no real user data
+ // containing them).
+ //
+ // It might be possible to also free up other tags which were never persisted
+ // (e.g. because they were used only for transfer) in the future.
+ kLegacyReservedMessagePort = 'M',
+ kLegacyReservedBlob = 'b',
+ kLegacyReservedBlobIndex = 'i',
+ kLegacyReservedFile = 'f',
+ kLegacyReservedFileIndex = 'e',
+ kLegacyReservedDOMFileSystem = 'd',
+ kLegacyReservedFileList = 'l',
+ kLegacyReservedFileListIndex = 'L',
+ kLegacyReservedImageData = '#',
+ kLegacyReservedImageBitmap = 'g',
+ kLegacyReservedImageBitmapTransfer = 'G',
+ kLegacyReservedOffscreenCanvas = 'H',
+ kLegacyReservedCryptoKey = 'K',
+ kLegacyReservedRTCCertificate = 'k',
};
namespace {
@@ -184,6 +216,28 @@ enum class WasmEncodingTag : uint8_t {
kRawBytes = 'y',
};
+// Sub-tags only meaningful for error serialization.
+enum class ErrorTag : uint8_t {
+ // The error is a EvalError. No accompanying data.
+ kEvalErrorPrototype = 'E',
+ // The error is a RangeError. No accompanying data.
+ kRangeErrorPrototype = 'R',
+ // The error is a ReferenceError. No accompanying data.
+ kReferenceErrorPrototype = 'F',
+ // The error is a SyntaxError. No accompanying data.
+ kSyntaxErrorPrototype = 'S',
+ // The error is a TypeError. No accompanying data.
+ kTypeErrorPrototype = 'T',
+ // The error is a URIError. No accompanying data.
+ kUriErrorPrototype = 'U',
+ // Followed by message: string.
+ kMessage = 'm',
+ // Followed by stack: string.
+ kStack = 's',
+ // The end of this error information.
+ kEnd = '.',
+};
+
} // namespace
ValueSerializer::ValueSerializer(Isolate* isolate,
@@ -505,8 +559,9 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_DATE_TYPE:
WriteJSDate(JSDate::cast(*receiver));
return ThrowIfOutOfMemory();
- case JS_VALUE_TYPE:
- return WriteJSValue(Handle<JSValue>::cast(receiver));
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ return WriteJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper>::cast(receiver));
case JS_REGEXP_TYPE:
WriteJSRegExp(JSRegExp::cast(*receiver));
return ThrowIfOutOfMemory();
@@ -519,6 +574,8 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
+ case JS_ERROR_TYPE:
+ return WriteJSError(Handle<JSObject>::cast(receiver));
case WASM_MODULE_TYPE: {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) {
@@ -720,7 +777,8 @@ void ValueSerializer::WriteJSDate(JSDate date) {
WriteDouble(date.value().Number());
}
-Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
+Maybe<bool> ValueSerializer::WriteJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper> value) {
Object inner_value = value->value();
if (inner_value.IsTrue(isolate_)) {
WriteTag(SerializationTag::kTrueObject);
@@ -874,6 +932,60 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
return ThrowIfOutOfMemory();
}
+Maybe<bool> ValueSerializer::WriteJSError(Handle<JSObject> error) {
+ Handle<Object> stack;
+ PropertyDescriptor message_desc;
+ Maybe<bool> message_found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate_, error, isolate_->factory()->message_string(), &message_desc);
+ MAYBE_RETURN(message_found, Nothing<bool>());
+
+ WriteTag(SerializationTag::kError);
+
+ Handle<HeapObject> prototype;
+ if (!JSObject::GetPrototype(isolate_, error).ToHandle(&prototype)) {
+ return Nothing<bool>();
+ }
+
+ if (*prototype == isolate_->eval_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kEvalErrorPrototype));
+ } else if (*prototype == isolate_->range_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kRangeErrorPrototype));
+ } else if (*prototype == isolate_->reference_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kReferenceErrorPrototype));
+ } else if (*prototype == isolate_->syntax_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kSyntaxErrorPrototype));
+ } else if (*prototype == isolate_->type_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kTypeErrorPrototype));
+ } else if (*prototype == isolate_->uri_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kUriErrorPrototype));
+ } else {
+ // The default prototype in the deserialization side is Error.prototype, so
+ // we don't have to do anything here.
+ }
+
+ if (message_found.FromJust() &&
+ PropertyDescriptor::IsDataDescriptor(&message_desc)) {
+ Handle<String> message;
+ if (!Object::ToString(isolate_, message_desc.value()).ToHandle(&message)) {
+ return Nothing<bool>();
+ }
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kMessage));
+ WriteString(message);
+ }
+
+ if (!Object::GetProperty(isolate_, error, isolate_->factory()->stack_string())
+ .ToHandle(&stack)) {
+ return Nothing<bool>();
+ }
+ if (stack->IsString()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kStack));
+ WriteString(Handle<String>::cast(stack));
+ }
+
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kEnd));
+ return ThrowIfOutOfMemory();
+}
+
Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
if (delegate_ != nullptr) {
// TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject.
@@ -1238,7 +1350,7 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
case SerializationTag::kNumberObject:
case SerializationTag::kBigIntObject:
case SerializationTag::kStringObject:
- return ReadJSValue(tag);
+ return ReadJSPrimitiveWrapper(tag);
case SerializationTag::kRegExp:
return ReadJSRegExp();
case SerializationTag::kBeginJSMap:
@@ -1256,6 +1368,8 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
const bool is_shared = true;
return ReadJSArrayBuffer(is_shared);
}
+ case SerializationTag::kError:
+ return ReadJSError();
case SerializationTag::kWasmModule:
return ReadWasmModule();
case SerializationTag::kWasmModuleTransfer:
@@ -1519,24 +1633,25 @@ MaybeHandle<JSDate> ValueDeserializer::ReadJSDate() {
return date;
}
-MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
+MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper(
+ SerializationTag tag) {
uint32_t id = next_id_++;
- Handle<JSValue> value;
+ Handle<JSPrimitiveWrapper> value;
switch (tag) {
case SerializationTag::kTrueObject:
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->boolean_function(), allocation_));
value->set_value(ReadOnlyRoots(isolate_).true_value());
break;
case SerializationTag::kFalseObject:
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->boolean_function(), allocation_));
value->set_value(ReadOnlyRoots(isolate_).false_value());
break;
case SerializationTag::kNumberObject: {
double number;
- if (!ReadDouble().To(&number)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ if (!ReadDouble().To(&number)) return MaybeHandle<JSPrimitiveWrapper>();
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->number_function(), allocation_));
Handle<Object> number_object =
isolate_->factory()->NewNumber(number, allocation_);
@@ -1545,16 +1660,18 @@ MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
}
case SerializationTag::kBigIntObject: {
Handle<BigInt> bigint;
- if (!ReadBigInt().ToHandle(&bigint)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ if (!ReadBigInt().ToHandle(&bigint))
+ return MaybeHandle<JSPrimitiveWrapper>();
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->bigint_function(), allocation_));
value->set_value(*bigint);
break;
}
case SerializationTag::kStringObject: {
Handle<String> string;
- if (!ReadString().ToHandle(&string)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ if (!ReadString().ToHandle(&string))
+ return MaybeHandle<JSPrimitiveWrapper>();
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->string_function(), allocation_));
value->set_value(*string);
break;
@@ -1578,7 +1695,7 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
// Ensure the deserialized flags are valid.
// TODO(adamk): Can we remove this check now that dotAll is always-on?
- uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::FlagCount();
+ uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::kFlagCount;
if ((raw_flags & flags_mask) ||
!JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
@@ -1768,6 +1885,78 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
return typed_array;
}
+MaybeHandle<Object> ValueDeserializer::ReadJSError() {
+ Handle<Object> message = isolate_->factory()->undefined_value();
+ Handle<Object> stack = isolate_->factory()->undefined_value();
+ Handle<Object> no_caller;
+ auto constructor = isolate_->error_function();
+ bool done = false;
+
+ while (!done) {
+ uint8_t tag;
+ if (!ReadVarint<uint8_t>().To(&tag)) {
+ return MaybeHandle<JSObject>();
+ }
+ switch (static_cast<ErrorTag>(tag)) {
+ case ErrorTag::kEvalErrorPrototype:
+ constructor = isolate_->eval_error_function();
+ break;
+ case ErrorTag::kRangeErrorPrototype:
+ constructor = isolate_->range_error_function();
+ break;
+ case ErrorTag::kReferenceErrorPrototype:
+ constructor = isolate_->reference_error_function();
+ break;
+ case ErrorTag::kSyntaxErrorPrototype:
+ constructor = isolate_->syntax_error_function();
+ break;
+ case ErrorTag::kTypeErrorPrototype:
+ constructor = isolate_->type_error_function();
+ break;
+ case ErrorTag::kUriErrorPrototype:
+ constructor = isolate_->uri_error_function();
+ break;
+ case ErrorTag::kMessage: {
+ Handle<String> message_string;
+ if (!ReadString().ToHandle(&message_string)) {
+ return MaybeHandle<JSObject>();
+ }
+ message = message_string;
+ break;
+ }
+ case ErrorTag::kStack: {
+ Handle<String> stack_string;
+ if (!ReadString().ToHandle(&stack_string)) {
+ return MaybeHandle<JSObject>();
+ }
+ stack = stack_string;
+ break;
+ }
+ case ErrorTag::kEnd:
+ done = true;
+ break;
+ default:
+ return MaybeHandle<JSObject>();
+ }
+ }
+
+ Handle<Object> error;
+ if (!ErrorUtils::Construct(isolate_, constructor, constructor, message,
+ SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kNone)
+ .ToHandle(&error)) {
+ return MaybeHandle<Object>();
+ }
+
+ if (Object::SetProperty(
+ isolate_, error, isolate_->factory()->stack_trace_symbol(), stack,
+ StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
+ .is_null()) {
+ return MaybeHandle<Object>();
+ }
+ return error;
+}
+
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index b83227d9d3..9e381d7e76 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -11,7 +11,7 @@
#include "include/v8.h"
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/handles/maybe-handles.h"
#include "src/utils/identity-map.h"
#include "src/utils/vector.h"
@@ -27,9 +27,9 @@ class JSArrayBuffer;
class JSArrayBufferView;
class JSDate;
class JSMap;
+class JSPrimitiveWrapper;
class JSRegExp;
class JSSet;
-class JSValue;
class MutableHeapNumber;
class Object;
class Oddball;
@@ -120,13 +120,15 @@ class ValueSerializer {
Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArray(Handle<JSArray> array) V8_WARN_UNUSED_RESULT;
void WriteJSDate(JSDate date);
- Maybe<bool> WriteJSValue(Handle<JSValue> value) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSPrimitiveWrapper(Handle<JSPrimitiveWrapper> value)
+ V8_WARN_UNUSED_RESULT;
void WriteJSRegExp(JSRegExp regexp);
Maybe<bool> WriteJSMap(Handle<JSMap> map) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSSet(Handle<JSSet> map) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView array_buffer);
+ Maybe<bool> WriteJSError(Handle<JSObject> error) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
@@ -264,7 +266,8 @@ class ValueDeserializer {
MaybeHandle<JSArray> ReadSparseJSArray() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArray> ReadDenseJSArray() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSDate> ReadJSDate() V8_WARN_UNUSED_RESULT;
- MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSPrimitiveWrapper> ReadJSPrimitiveWrapper(SerializationTag tag)
+ V8_WARN_UNUSED_RESULT;
MaybeHandle<JSRegExp> ReadJSRegExp() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSMap> ReadJSMap() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSSet> ReadJSSet() V8_WARN_UNUSED_RESULT;
@@ -274,6 +277,7 @@ class ValueDeserializer {
V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<Object> ReadJSError() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadWasmModule() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadWasmModuleTransfer() V8_WARN_UNUSED_RESULT;
MaybeHandle<WasmMemoryObject> ReadWasmMemory() V8_WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 177f214415..40e6e8b427 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
adamk@chromium.org
gsathya@chromium.org
leszeks@chromium.org
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index 62e8c0a47a..5a6ef376a8 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -6,7 +6,7 @@
#define V8_PARSING_EXPRESSION_SCOPE_H_
#include "src/ast/scopes.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/objects/function-kind.h"
#include "src/parsing/scanner.h"
#include "src/zone/zone.h" // For ScopedPtrList.
@@ -330,7 +330,7 @@ class VariableDeclarationParsingScope : public ExpressionScope<Types> {
// This also handles marking of loop variables in for-in and for-of
// loops, as determined by loop-nesting-depth.
DCHECK_NOT_NULL(var);
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
}
}
return var;
@@ -396,8 +396,8 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
using ExpressionScopeT = ExpressionScope<Types>;
using ScopeType = typename ExpressionScopeT::ScopeType;
- ExpressionParsingScope(ParserT* parser,
- ScopeType type = ExpressionScopeT::kExpression)
+ explicit ExpressionParsingScope(
+ ParserT* parser, ScopeType type = ExpressionScopeT::kExpression)
: ExpressionScopeT(parser, type),
variable_list_(parser->variable_buffer()),
has_async_arrow_in_scope_chain_(
@@ -437,8 +437,7 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
}
this->mark_verified();
return this->parser()->RewriteInvalidReferenceExpression(
- expression, beg_pos, end_pos, MessageTemplate::kInvalidLhsInFor,
- kSyntaxError);
+ expression, beg_pos, end_pos, MessageTemplate::kInvalidLhsInFor);
}
void RecordExpressionError(const Scanner::Location& loc,
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index ed9d80861b..0ae09d9897 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -97,7 +97,7 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
set_start_position(shared->StartPosition());
set_end_position(shared->EndPosition());
- function_literal_id_ = shared->FunctionLiteralId(isolate);
+ function_literal_id_ = shared->function_literal_id();
SetFunctionInfo(shared);
Handle<Script> script(Script::cast(shared->script()), isolate);
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 0ecd8ecedb..2dfb0d2461 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -16,7 +16,7 @@
#include "src/base/v8-fallthrough.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/function-kind.h"
@@ -624,6 +624,11 @@ class ParserBase {
}
}
+ RequiresBrandCheckFlag RequiresBrandCheck(ClassLiteralProperty::Kind kind) {
+ return kind == ClassLiteralProperty::Kind::FIELD ? kNoBrandCheck
+ : kRequiresBrandCheck;
+ }
+
const AstRawString* ClassFieldVariableName(AstValueFactory* ast_value_factory,
int index) {
std::string name = ".class-field-" + std::to_string(index);
@@ -780,7 +785,7 @@ class ParserBase {
if (scanner()->current_token() == Token::AWAIT && !is_async_function()) {
ReportMessageAt(scanner()->location(),
- MessageTemplate::kAwaitNotInAsyncFunction, kSyntaxError);
+ MessageTemplate::kAwaitNotInAsyncFunction);
return;
}
@@ -930,21 +935,19 @@ class ParserBase {
V8_NOINLINE void ReportMessage(MessageTemplate message) {
Scanner::Location source_location = scanner()->location();
impl()->ReportMessageAt(source_location, message,
- static_cast<const char*>(nullptr), kSyntaxError);
+ static_cast<const char*>(nullptr));
}
template <typename T>
- V8_NOINLINE void ReportMessage(MessageTemplate message, T arg,
- ParseErrorType error_type = kSyntaxError) {
+ V8_NOINLINE void ReportMessage(MessageTemplate message, T arg) {
Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message, arg, error_type);
+ impl()->ReportMessageAt(source_location, message, arg);
}
V8_NOINLINE void ReportMessageAt(Scanner::Location location,
- MessageTemplate message,
- ParseErrorType error_type) {
+ MessageTemplate message) {
impl()->ReportMessageAt(location, message,
- static_cast<const char*>(nullptr), error_type);
+ static_cast<const char*>(nullptr));
}
V8_NOINLINE void ReportUnexpectedToken(Token::Value token);
@@ -1213,9 +1216,9 @@ class ParserBase {
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
// we allow calls for web compatibility and rewrite them to a runtime throw.
- ExpressionT RewriteInvalidReferenceExpression(
- ExpressionT expression, int beg_pos, int end_pos, MessageTemplate message,
- ParseErrorType type = kReferenceError);
+ ExpressionT RewriteInvalidReferenceExpression(ExpressionT expression,
+ int beg_pos, int end_pos,
+ MessageTemplate message);
bool IsValidReferenceExpression(ExpressionT expression);
@@ -1305,7 +1308,7 @@ class ParserBase {
return factory()->NewReturnStatement(expr, pos, end_pos);
}
- ModuleDescriptor* module() const {
+ SourceTextModuleDescriptor* module() const {
return scope()->AsModuleScope()->module();
}
Scope* scope() const { return scope_; }
@@ -1567,8 +1570,7 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
if (class_scope == nullptr) {
impl()->ReportMessageAt(Scanner::Location(pos, pos + 1),
MessageTemplate::kInvalidPrivateFieldResolution,
- impl()->GetRawNameFromIdentifier(name),
- kSyntaxError);
+ impl()->GetRawNameFromIdentifier(name));
return impl()->FailureExpression();
}
key = impl()->ExpressionFromPrivateName(class_scope, name, pos);
@@ -1590,15 +1592,14 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
}
IdentifierT js_pattern = impl()->GetNextSymbol();
- Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
+ Maybe<int> flags = scanner()->ScanRegExpFlags();
if (flags.IsNothing()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
return impl()->FailureExpression();
}
- int js_flags = flags.FromJust();
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, pos);
+ return factory()->NewRegExpLiteral(js_pattern, flags.FromJust(), pos);
}
template <typename Impl>
@@ -2657,13 +2658,11 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
impl()->ReportMessageAt(loc,
MessageTemplate::kInvalidDestructuringTarget);
} else {
- // Reference Error if LHS is neither object literal nor an array literal
+ // Syntax Error if LHS is neither object literal nor an array literal
// (Parenthesized literals are
// CoverParenthesizedExpressionAndArrowParameterList).
// #sec-assignment-operators-static-semantics-early-errors
- impl()->ReportMessageAt(loc, MessageTemplate::kInvalidLhsInAssignment,
- static_cast<const char*>(nullptr),
- kReferenceError);
+ impl()->ReportMessageAt(loc, MessageTemplate::kInvalidLhsInAssignment);
}
}
expression_scope()->ValidateAsPattern(expression, lhs_beg_pos,
@@ -2905,7 +2904,7 @@ ParserBase<Impl>::ParseUnaryOrPrefixExpression() {
return impl()->FailureExpression();
}
- if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
+ if (impl()->IsPrivateReference(expression)) {
ReportMessage(MessageTemplate::kDeletePrivateField);
return impl()->FailureExpression();
}
@@ -3291,7 +3290,18 @@ ParserBase<Impl>::ParseImportExpressions() {
return impl()->ImportMetaExpression(pos);
}
- Expect(Token::LPAREN);
+
+ if (V8_UNLIKELY(peek() != Token::LPAREN)) {
+ if (!parsing_module_) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kImportOutsideModule);
+ } else {
+ ReportUnexpectedToken(Next());
+ }
+ return impl()->FailureExpression();
+ }
+
+ Consume(Token::LPAREN);
if (peek() == Token::RPAREN) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kImportMissingSpecifier);
@@ -3332,7 +3342,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
// TODO(rossberg): This might not be the correct FunctionState for the
// method here.
expression_scope()->RecordThisUse();
- UseThis()->set_maybe_assigned();
+ UseThis()->SetMaybeAssigned();
return impl()->NewSuperCallReference(pos);
}
}
@@ -4291,7 +4301,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
impl()->ReportMessageAt(Scanner::Location(unresolvable->position(),
unresolvable->position() + 1),
MessageTemplate::kInvalidPrivateFieldResolution,
- unresolvable->raw_name(), kSyntaxError);
+ unresolvable->raw_name());
return impl()->FailureExpression();
}
@@ -4442,15 +4452,14 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::RewriteInvalidReferenceExpression(ExpressionT expression,
int beg_pos, int end_pos,
- MessageTemplate message,
- ParseErrorType type) {
+ MessageTemplate message) {
DCHECK(!IsValidReferenceExpression(expression));
if (impl()->IsIdentifier(expression)) {
DCHECK(is_strict(language_mode()));
DCHECK(impl()->IsEvalOrArguments(impl()->AsIdentifier(expression)));
ReportMessageAt(Scanner::Location(beg_pos, end_pos),
- MessageTemplate::kStrictEvalArguments, kSyntaxError);
+ MessageTemplate::kStrictEvalArguments);
return impl()->FailureExpression();
}
if (expression->IsCall() && !expression->AsCall()->is_tagged_template()) {
@@ -4467,7 +4476,7 @@ ParserBase<Impl>::RewriteInvalidReferenceExpression(ExpressionT expression,
ExpressionT error = impl()->NewThrowReferenceError(message, beg_pos);
return factory()->NewProperty(expression, error, beg_pos);
}
- ReportMessageAt(Scanner::Location(beg_pos, end_pos), message, type);
+ ReportMessageAt(Scanner::Location(beg_pos, end_pos), message);
return impl()->FailureExpression();
}
@@ -4561,7 +4570,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic() {
if (has_spread) {
ReportMessageAt(Scanner::Location(pos, position()),
- MessageTemplate::kIntrinsicWithSpread, kSyntaxError);
+ MessageTemplate::kIntrinsicWithSpread);
return impl()->FailureExpression();
}
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 380920b8ba..2a860da3d0 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -15,8 +15,8 @@
#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/message-template.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
-#include "src/execution/message-template.h"
#include "src/logging/log.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/scope-info.h"
@@ -501,9 +501,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
Scope::DeserializationMode::kIncludingVariables);
scanner_.Initialize();
- if (FLAG_harmony_hashbang) {
- scanner_.SkipHashBang();
- }
+ scanner_.SkipHashBang();
FunctionLiteral* result = DoParseProgram(isolate, info);
MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
@@ -1347,7 +1345,7 @@ Statement* Parser::ParseExportDeclaration() {
}
loc.end_pos = scanner()->location().end_pos;
- ModuleDescriptor* descriptor = module();
+ SourceTextModuleDescriptor* descriptor = module();
for (const AstRawString* name : names) {
descriptor->AddExport(name, name, loc, zone());
}
@@ -2783,13 +2781,15 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name) {
return proxy->var();
}
-Variable* Parser::CreatePrivateNameVariable(ClassScope* scope,
- const AstRawString* name) {
+Variable* Parser::CreatePrivateNameVariable(
+ ClassScope* scope, RequiresBrandCheckFlag requires_brand_check,
+ const AstRawString* name) {
DCHECK_NOT_NULL(name);
int begin = position();
int end = end_position();
bool was_added = false;
- Variable* var = scope->DeclarePrivateName(name, &was_added);
+ Variable* var =
+ scope->DeclarePrivateName(name, requires_brand_check, &was_added);
if (!was_added) {
Scanner::Location loc(begin, end);
ReportMessageAt(loc, MessageTemplate::kVarRedeclaration, var->raw_name());
@@ -2841,7 +2841,8 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope,
}
}
- Variable* private_name_var = CreatePrivateNameVariable(scope, property_name);
+ Variable* private_name_var =
+ CreatePrivateNameVariable(scope, RequiresBrandCheck(kind), property_name);
int pos = property->value()->position();
if (pos == kNoSourcePosition) {
pos = property->key()->position();
@@ -2950,16 +2951,6 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope,
return class_literal;
}
-bool Parser::IsPropertyWithPrivateFieldKey(Expression* expression) {
- if (!expression->IsProperty()) return false;
- Property* property = expression->AsProperty();
-
- if (!property->key()->IsVariableProxy()) return false;
- VariableProxy* key = property->key()->AsVariableProxy();
-
- return key->IsPrivateName();
-}
-
void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
// For each var-binding that shadows a parameter, insert an assignment
// initializing the variable with the parameter.
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index cb1c473af5..b7fb19c26f 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -297,8 +297,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
VariableKind kind, int beg_pos, int end_pos,
ZonePtrList<const AstRawString>* names);
Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name);
- Variable* CreatePrivateNameVariable(ClassScope* scope,
- const AstRawString* name);
+ Variable* CreatePrivateNameVariable(
+ ClassScope* scope, RequiresBrandCheckFlag requires_brand_check,
+ const AstRawString* name);
FunctionLiteral* CreateInitializerFunction(
const char* name, DeclarationScope* scope,
ZonePtrList<ClassLiteral::Property>* fields);
@@ -373,8 +374,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return object_literal;
}
- bool IsPropertyWithPrivateFieldKey(Expression* property);
-
// Insert initializer statements for var-bindings shadowing parameter bindings
// from a non-simple parameter list.
void InsertShadowingVarBindingInitializers(Block* block);
@@ -538,6 +537,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return property != nullptr && property->obj()->IsThisExpression();
}
+ // Returns true if the expression is of type "obj.#foo".
+ V8_INLINE static bool IsPrivateReference(Expression* expression) {
+ DCHECK_NOT_NULL(expression);
+ Property* property = expression->AsProperty();
+ return property != nullptr && property->IsPrivateReference();
+ }
+
// This returns true if the expression is an indentifier (wrapped
// inside a variable proxy). We exclude the case of 'this', which
// has been converted to a variable proxy.
@@ -690,11 +696,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const char* arg = nullptr) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner_.set_parser_error();
}
@@ -703,11 +707,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE void ReportUnidentifiableError() { UNREACHABLE(); }
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const AstRawString* arg) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner_.set_parser_error();
}
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index b6331b2f9d..d792d5c184 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -30,26 +30,26 @@ MessageLocation PendingCompilationErrorHandler::MessageDetails::GetLocation(
return MessageLocation(script, start_position_, end_position_);
}
-void PendingCompilationErrorHandler::ReportMessageAt(
- int start_position, int end_position, MessageTemplate message,
- const char* arg, ParseErrorType error_type) {
+void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const char* arg) {
if (has_pending_error_) return;
has_pending_error_ = true;
error_details_ =
MessageDetails(start_position, end_position, message, nullptr, arg);
- error_type_ = error_type;
}
-void PendingCompilationErrorHandler::ReportMessageAt(
- int start_position, int end_position, MessageTemplate message,
- const AstRawString* arg, ParseErrorType error_type) {
+void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const AstRawString* arg) {
if (has_pending_error_) return;
has_pending_error_ = true;
error_details_ =
MessageDetails(start_position, end_position, message, arg, nullptr);
- error_type_ = error_type;
}
void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
@@ -97,17 +97,8 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
isolate->debug()->OnCompileError(script);
Factory* factory = isolate->factory();
- Handle<Object> error;
- switch (error_type_) {
- case kReferenceError:
- error = factory->NewReferenceError(error_details_.message(), argument);
- break;
- case kSyntaxError:
- error = factory->NewSyntaxError(error_details_.message(), argument);
- break;
- default:
- UNREACHABLE();
- }
+ Handle<Object> error =
+ factory->NewSyntaxError(error_details_.message(), argument);
if (!error->IsJSObject()) {
isolate->Throw(*error, &location);
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index c6b9559931..cb2908eaf8 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -9,7 +9,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/handles/handles.h"
namespace v8 {
@@ -25,17 +25,13 @@ class Script;
class PendingCompilationErrorHandler {
public:
PendingCompilationErrorHandler()
- : has_pending_error_(false),
- stack_overflow_(false),
- error_type_(kSyntaxError) {}
+ : has_pending_error_(false), stack_overflow_(false) {}
void ReportMessageAt(int start_position, int end_position,
- MessageTemplate message, const char* arg = nullptr,
- ParseErrorType error_type = kSyntaxError);
+ MessageTemplate message, const char* arg = nullptr);
void ReportMessageAt(int start_position, int end_position,
- MessageTemplate message, const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError);
+ MessageTemplate message, const AstRawString* arg);
void ReportWarningAt(int start_position, int end_position,
MessageTemplate message, const char* arg = nullptr);
@@ -110,7 +106,6 @@ class PendingCompilationErrorHandler {
bool unidentifiable_error_ = false;
MessageDetails error_details_;
- ParseErrorType error_type_;
std::forward_list<MessageDetails> warning_messages_;
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 7f33d301cb..ea5e70a3c1 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -645,7 +645,7 @@ void BaseConsumedPreparseData<Data>::RestoreDataForVariable(Variable* var) {
#endif
uint8_t variable_data = scope_data_->ReadQuarter();
if (VariableMaybeAssignedField::decode(variable_data)) {
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
}
if (VariableContextAllocatedField::decode(variable_data)) {
var->set_is_used();
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 5d11bddb41..a078d79295 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -74,11 +74,9 @@ PreParser::PreParseResult PreParser::PreParseProgram() {
scope->set_is_being_lazily_parsed(true);
#endif
- if (FLAG_harmony_hashbang) {
- // Note: We should only skip the hashbang in non-Eval scripts
- // (currently, Eval is not handled by the PreParser).
- scanner()->SkipHashBang();
- }
+ // Note: We should only skip the hashbang in non-Eval scripts
+ // (currently, Eval is not handled by the PreParser).
+ scanner()->SkipHashBang();
// ModuleDeclarationInstantiation for Source Text Module Records creates a
// new Module Environment Record whose outer lexical environment record is
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index cca3b3675d..33c312f392 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -144,10 +144,10 @@ class PreParserExpression {
ExpressionTypeField::encode(kThisExpression));
}
- static PreParserExpression ThisPropertyWithPrivateFieldKey() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(
- kThisPropertyExpressionWithPrivateFieldKey));
+ static PreParserExpression ThisPrivateReference() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kThisPrivateReferenceExpression));
}
static PreParserExpression ThisProperty() {
@@ -162,10 +162,10 @@ class PreParserExpression {
ExpressionTypeField::encode(kPropertyExpression));
}
- static PreParserExpression PropertyWithPrivateFieldKey() {
+ static PreParserExpression PrivateReference() {
return PreParserExpression(
TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kPropertyExpressionWithPrivateFieldKey));
+ ExpressionTypeField::encode(kPrivateReferenceExpression));
}
static PreParserExpression Call() {
@@ -242,25 +242,23 @@ class PreParserExpression {
return TypeField::decode(code_) == kExpression &&
(ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
ExpressionTypeField::decode(code_) ==
- kThisPropertyExpressionWithPrivateFieldKey);
+ kThisPrivateReferenceExpression);
}
bool IsProperty() const {
return TypeField::decode(code_) == kExpression &&
(ExpressionTypeField::decode(code_) == kPropertyExpression ||
ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
+ ExpressionTypeField::decode(code_) == kPrivateReferenceExpression ||
ExpressionTypeField::decode(code_) ==
- kPropertyExpressionWithPrivateFieldKey ||
- ExpressionTypeField::decode(code_) ==
- kThisPropertyExpressionWithPrivateFieldKey);
+ kThisPrivateReferenceExpression);
}
- bool IsPropertyWithPrivateFieldKey() const {
+ bool IsPrivateReference() const {
return TypeField::decode(code_) == kExpression &&
- (ExpressionTypeField::decode(code_) ==
- kPropertyExpressionWithPrivateFieldKey ||
+ (ExpressionTypeField::decode(code_) == kPrivateReferenceExpression ||
ExpressionTypeField::decode(code_) ==
- kThisPropertyExpressionWithPrivateFieldKey);
+ kThisPrivateReferenceExpression);
}
bool IsCall() const {
@@ -332,9 +330,9 @@ class PreParserExpression {
enum ExpressionType {
kThisExpression,
kThisPropertyExpression,
- kThisPropertyExpressionWithPrivateFieldKey,
+ kThisPrivateReferenceExpression,
kPropertyExpression,
- kPropertyExpressionWithPrivateFieldKey,
+ kPrivateReferenceExpression,
kCallExpression,
kCallEvalExpression,
kCallTaggedTemplateExpression,
@@ -573,9 +571,9 @@ class PreParserFactory {
const PreParserExpression& key, int pos) {
if (key.IsIdentifier() && key.AsIdentifier().IsPrivateName()) {
if (obj.IsThis()) {
- return PreParserExpression::ThisPropertyWithPrivateFieldKey();
+ return PreParserExpression::ThisPrivateReference();
}
- return PreParserExpression::PropertyWithPrivateFieldKey();
+ return PreParserExpression::PrivateReference();
}
if (obj.IsThis()) {
@@ -848,7 +846,7 @@ class PreParserFuncNameInferrer {
class PreParserSourceRange {
public:
- PreParserSourceRange() {}
+ PreParserSourceRange() = default;
PreParserSourceRange(int start, int end) {}
static PreParserSourceRange Empty() { return PreParserSourceRange(); }
static PreParserSourceRange OpenEnded(int32_t start) { return Empty(); }
@@ -1045,9 +1043,8 @@ class PreParser : public ParserBase<PreParser> {
TemplateLiteralState* state, int start, const PreParserExpression& tag) {
return PreParserExpression::Default();
}
- V8_INLINE bool IsPropertyWithPrivateFieldKey(
- const PreParserExpression& expression) {
- return expression.IsPropertyWithPrivateFieldKey();
+ V8_INLINE bool IsPrivateReference(const PreParserExpression& expression) {
+ return expression.IsPrivateReference();
}
V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
scope->SetLanguageMode(mode);
@@ -1103,9 +1100,10 @@ class PreParser : public ParserBase<PreParser> {
// Don't bother actually binding the proxy.
}
- Variable* DeclarePrivateVariableName(const AstRawString* name,
- ClassScope* scope, bool* was_added) {
- return scope->DeclarePrivateName(name, was_added);
+ Variable* DeclarePrivateVariableName(
+ const AstRawString* name, ClassScope* scope,
+ RequiresBrandCheckFlag requires_brand_check, bool* was_added) {
+ return scope->DeclarePrivateName(name, requires_brand_check, was_added);
}
Variable* DeclareVariableName(const AstRawString* name, VariableMode mode,
@@ -1258,7 +1256,9 @@ class PreParser : public ParserBase<PreParser> {
return;
}
bool was_added;
- DeclarePrivateVariableName(property_name.string_, scope, &was_added);
+
+ DeclarePrivateVariableName(property_name.string_, scope,
+ RequiresBrandCheck(kind), &was_added);
if (!was_added) {
Scanner::Location loc(property.position(), property.position() + 1);
ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
@@ -1483,11 +1483,9 @@ class PreParser : public ParserBase<PreParser> {
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const char* arg = nullptr) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner()->set_parser_error();
}
@@ -1498,17 +1496,14 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void ReportMessageAt(Scanner::Location source_location,
MessageTemplate message,
- const PreParserIdentifier& arg,
- ParseErrorType error_type = kSyntaxError) {
+ const PreParserIdentifier& arg) {
UNREACHABLE();
}
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const AstRawString* arg) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner()->set_parser_error();
}
@@ -1644,11 +1639,11 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Jump();
}
- V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
- PreParserExpression& pattern,
- const PreParserExpression& initializer,
- int initializer_end_position,
- bool is_rest) {
+ V8_INLINE void AddFormalParameter(
+ PreParserFormalParameters* parameters,
+ PreParserExpression& pattern, // NOLINT(runtime/references)
+ const PreParserExpression& initializer, int initializer_end_position,
+ bool is_rest) {
DeclarationScope* scope = parameters->scope;
scope->RecordParameter(is_rest);
parameters->UpdateArityAndFunctionLength(!initializer.IsNull(), is_rest);
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 7758b2bb73..01ea0a0d02 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -590,7 +590,8 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
}
}
- while (cursor < end && output_cursor + 1 < buffer_start_ + kBufferSize) {
+ const uint16_t* max_buffer_end = buffer_start_ + kBufferSize;
+ while (cursor < end && output_cursor + 1 < max_buffer_end) {
unibrow::uchar t =
unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
if (V8_LIKELY(t <= unibrow::Utf16::kMaxNonSurrogateCharCode)) {
@@ -601,6 +602,15 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
*(output_cursor++) = unibrow::Utf16::LeadSurrogate(t);
*(output_cursor++) = unibrow::Utf16::TrailSurrogate(t);
}
+ // Fast path for ascii sequences.
+ size_t remaining = end - cursor;
+ size_t max_buffer = max_buffer_end - output_cursor;
+ int max_length = static_cast<int>(Min(remaining, max_buffer));
+ DCHECK_EQ(state, unibrow::Utf8::State::kAccept);
+ int ascii_length = NonAsciiStart(cursor, max_length);
+ CopyChars(output_cursor, cursor, ascii_length);
+ cursor += ascii_length;
+ output_cursor += ascii_length;
}
current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data);
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 709f28a02d..2f74548020 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -1004,45 +1004,21 @@ bool Scanner::ScanRegExpPattern() {
return true;
}
-
-Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
+Maybe<int> Scanner::ScanRegExpFlags() {
DCHECK_EQ(Token::REGEXP_LITERAL, next().token);
// Scan regular expression flags.
- int flags = 0;
+ JSRegExp::Flags flags;
while (IsIdentifierPart(c0_)) {
- RegExp::Flags flag = RegExp::kNone;
- switch (c0_) {
- case 'g':
- flag = RegExp::kGlobal;
- break;
- case 'i':
- flag = RegExp::kIgnoreCase;
- break;
- case 'm':
- flag = RegExp::kMultiline;
- break;
- case 's':
- flag = RegExp::kDotAll;
- break;
- case 'u':
- flag = RegExp::kUnicode;
- break;
- case 'y':
- flag = RegExp::kSticky;
- break;
- default:
- return Nothing<RegExp::Flags>();
- }
- if (flags & flag) {
- return Nothing<RegExp::Flags>();
- }
+ JSRegExp::Flags flag = JSRegExp::FlagFromChar(c0_);
+ if (flag == JSRegExp::kInvalid) return Nothing<int>();
+ if (flags & flag) return Nothing<int>();
Advance();
flags |= flag;
}
next().location.end_pos = source_pos();
- return Just(RegExp::Flags(flags));
+ return Just<int>(flags);
}
const AstRawString* Scanner::CurrentSymbol(
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 449aca46ff..e2865bca1c 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -11,7 +11,7 @@
#include "src/base/logging.h"
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/parsing/literal-buffer.h"
#include "src/parsing/token.h"
#include "src/strings/char-predicates.h"
@@ -392,7 +392,7 @@ class V8_EXPORT_PRIVATE Scanner {
// Returns true if a pattern is scanned.
bool ScanRegExpPattern();
// Scans the input as regular expression flags. Returns the flags on success.
- Maybe<RegExp::Flags> ScanRegExpFlags();
+ Maybe<int> ScanRegExpFlags();
// Scans the input as a template literal
Token::Value ScanTemplateContinuation() {
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index a912c2e1b2..472dbdbb10 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -173,7 +173,8 @@ void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
HeapObject object;
- CombinedHeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
+ CombinedHeapObjectIterator iterator(heap(),
+ HeapObjectIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
@@ -202,10 +203,21 @@ Isolate* HeapProfiler::isolate() const { return heap()->isolate(); }
void HeapProfiler::QueryObjects(Handle<Context> context,
debug::QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
+ {
+ CombinedHeapObjectIterator function_heap_iterator(
+ heap(), HeapObjectIterator::kFilterUnreachable);
+ for (HeapObject heap_obj = function_heap_iterator.Next();
+ !heap_obj.is_null(); heap_obj = function_heap_iterator.Next()) {
+ if (heap_obj.IsFeedbackVector()) {
+ FeedbackVector::cast(heap_obj).ClearSlots(isolate());
+ }
+ }
+ }
// We should return accurate information about live objects, so we need to
// collect all garbage first.
heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
- CombinedHeapIterator heap_iterator(heap());
+ CombinedHeapObjectIterator heap_iterator(
+ heap(), HeapObjectIterator::kFilterUnreachable);
for (HeapObject heap_obj = heap_iterator.Next(); !heap_obj.is_null();
heap_obj = heap_iterator.Next()) {
if (!heap_obj.IsJSObject() || heap_obj.IsExternal(isolate())) continue;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index bc171360b5..df941eda96 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -395,7 +395,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
}
heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kHeapProfiler);
- CombinedHeapIterator iterator(heap_);
+ CombinedHeapObjectIterator iterator(heap_);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
FindOrAddEntry(obj.address(), obj.Size());
@@ -643,7 +643,7 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
}
int V8HeapExplorer::EstimateObjectsCount() {
- CombinedHeapIterator it(heap_, HeapIterator::kFilterUnreachable);
+ CombinedHeapObjectIterator it(heap_, HeapObjectIterator::kFilterUnreachable);
int objects_count = 0;
while (!it.Next().is_null()) ++objects_count;
return objects_count;
@@ -1446,7 +1446,8 @@ bool V8HeapExplorer::IterateAndExtractReferences(
bool interrupted = false;
- CombinedHeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+ CombinedHeapObjectIterator iterator(heap_,
+ HeapObjectIterator::kFilterUnreachable);
// Heap iteration with filtering must be finished in any case.
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next(), progress_->ProgressStep()) {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 756500151f..d3d3330e27 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -25,10 +25,8 @@
namespace v8 {
namespace internal {
-class AllocationTracker;
class AllocationTraceNode;
class HeapEntry;
-class HeapIterator;
class HeapProfiler;
class HeapSnapshot;
class HeapSnapshotGenerator;
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 1f2b4bc72a..b3ea07db34 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -270,9 +270,9 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// bytecode_array might be garbage, so don't actually dereference it. We
// avoid the frame->GetXXX functions since they call BytecodeArray::cast,
// which has a heap access in its DCHECK.
- i::Address bytecode_array = i::Memory<i::Address>(
+ i::Address bytecode_array = base::Memory<i::Address>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeArrayFromFp);
- i::Address bytecode_offset = i::Memory<i::Address>(
+ i::Address bytecode_offset = base::Memory<i::Address>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeOffsetFromFp);
// If the bytecode array is a heap object and the bytecode offset is a
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
index 7f916e12ea..250c8c6b88 100644
--- a/deps/v8/src/regexp/OWNERS
+++ b/deps/v8/src/regexp/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
jgruber@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
deleted file mode 100644
index b542add17b..0000000000
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-#ifndef V8_REGEXP_JSREGEXP_INL_H_
-#define V8_REGEXP_JSREGEXP_INL_H_
-
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/objects.h"
-#include "src/regexp/jsregexp.h"
-#include "src/utils/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-RegExpImpl::GlobalCache::~GlobalCache() {
- // Deallocate the register array if we allocated it in the constructor
- // (as opposed to using the existing jsregexp_static_offsets_vector).
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(register_array_);
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::FetchNext() {
- current_match_index_++;
- if (current_match_index_ >= num_matches_) {
- // Current batch of results exhausted.
- // Fail if last batch was not even fully filled.
- if (num_matches_ < max_matches_) {
- num_matches_ = 0; // Signal failed match.
- return nullptr;
- }
-
- int32_t* last_match =
- &register_array_[(current_match_index_ - 1) * registers_per_match_];
- int last_end_index = last_match[1];
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- num_matches_ =
- RegExpImpl::AtomExecRaw(isolate_, regexp_, subject_, last_end_index,
- register_array_, register_array_size_);
- } else {
- int last_start_index = last_match[0];
- if (last_start_index == last_end_index) {
- // Zero-length match. Advance by one code point.
- last_end_index = AdvanceZeroLength(last_end_index);
- }
- if (last_end_index > subject_->length()) {
- num_matches_ = 0; // Signal failed match.
- return nullptr;
- }
- num_matches_ = RegExpImpl::IrregexpExecRaw(
- isolate_, regexp_, subject_, last_end_index, register_array_,
- register_array_size_);
- }
-
- if (num_matches_ <= 0) return nullptr;
- current_match_index_ = 0;
- return register_array_;
- } else {
- return &register_array_[current_match_index_ * registers_per_match_];
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
- int index = current_match_index_ * registers_per_match_;
- if (num_matches_ == 0) {
- // After a failed match we shift back by one result.
- index -= registers_per_match_;
- }
- return &register_array_[index];
-}
-
-RegExpEngine::CompilationResult::CompilationResult(Isolate* isolate,
- const char* error_message)
- : error_message(error_message),
- code(ReadOnlyRoots(isolate).the_hole_value()) {}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_REGEXP_JSREGEXP_INL_H_
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
deleted file mode 100644
index a6f3a5ebcb..0000000000
--- a/deps/v8/src/regexp/jsregexp.cc
+++ /dev/null
@@ -1,7055 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/regexp/jsregexp.h"
-
-#include <memory>
-#include <vector>
-
-#include "src/base/platform/platform.h"
-#include "src/codegen/compilation-cache.h"
-#include "src/diagnostics/code-tracer.h"
-#include "src/execution/execution.h"
-#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
-#include "src/heap/factory.h"
-#include "src/heap/heap-inl.h"
-#include "src/objects/elements.h"
-#include "src/regexp/interpreter-irregexp.h"
-#include "src/regexp/jsregexp-inl.h"
-#include "src/regexp/regexp-macro-assembler-irregexp.h"
-#include "src/regexp/regexp-macro-assembler-tracer.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/regexp/regexp-parser.h"
-#include "src/regexp/regexp-stack.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
-#include "src/runtime/runtime.h"
-#include "src/strings/string-search.h"
-#include "src/strings/unicode-decoder.h"
-#include "src/strings/unicode-inl.h"
-#include "src/utils/ostreams.h"
-#include "src/utils/splay-tree-inl.h"
-#include "src/zone/zone-list-inl.h"
-
-#ifdef V8_INTL_SUPPORT
-#include "unicode/locid.h"
-#include "unicode/uniset.h"
-#include "unicode/utypes.h"
-#endif // V8_INTL_SUPPORT
-
-#if V8_TARGET_ARCH_IA32
-#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/regexp/x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/regexp/s390/regexp-macro-assembler-s390.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/regexp/mips/regexp-macro-assembler-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-V8_WARN_UNUSED_RESULT
-static inline MaybeHandle<Object> ThrowRegExpException(
- Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- Handle<String> error_text) {
- THROW_NEW_ERROR(isolate, NewSyntaxError(MessageTemplate::kMalformedRegExp,
- pattern, error_text),
- Object);
-}
-
-inline void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> error_text) {
- USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
- error_text));
-}
-
-
-ContainedInLattice AddRange(ContainedInLattice containment,
- const int* ranges,
- int ranges_length,
- Interval new_range) {
- DCHECK_EQ(1, ranges_length & 1);
- DCHECK_EQ(String::kMaxCodePoint + 1, ranges[ranges_length - 1]);
- if (containment == kLatticeUnknown) return containment;
- bool inside = false;
- int last = 0;
- for (int i = 0; i < ranges_length; inside = !inside, last = ranges[i], i++) {
- // Consider the range from last to ranges[i].
- // We haven't got to the new range yet.
- if (ranges[i] <= new_range.from()) continue;
- // New range is wholly inside last-ranges[i]. Note that new_range.to() is
- // inclusive, but the values in ranges are not.
- if (last <= new_range.from() && new_range.to() < ranges[i]) {
- return Combine(containment, inside ? kLatticeIn : kLatticeOut);
- }
- return kLatticeUnknown;
- }
- return containment;
-}
-
-// More makes code generation slower, less makes V8 benchmark score lower.
-const int kMaxLookaheadForBoyerMoore = 8;
-// In a 3-character pattern you can maximally step forwards 3 characters
-// at a time, which is not always enough to pay for the extra logic.
-const int kPatternTooShortForBoyerMoore = 2;
-
-// Identifies the sort of regexps where the regexp engine is faster
-// than the code used for atom matches.
-static bool HasFewDifferentCharacters(Handle<String> pattern) {
- int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
- if (length <= kPatternTooShortForBoyerMoore) return false;
- const int kMod = 128;
- bool character_found[kMod];
- int different = 0;
- memset(&character_found[0], 0, sizeof(character_found));
- for (int i = 0; i < length; i++) {
- int ch = (pattern->Get(i) & (kMod - 1));
- if (!character_found[ch]) {
- character_found[ch] = true;
- different++;
- // We declare a regexp low-alphabet if it has at least 3 times as many
- // characters as it has different characters.
- if (different * 3 > length) return false;
- }
- }
- return true;
-}
-
-// Generic RegExp methods. Dispatches to implementation specific methods.
-
-MaybeHandle<Object> RegExpImpl::Compile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags) {
- DCHECK(pattern->IsFlat());
-
- Zone zone(isolate->allocator(), ZONE_NAME);
- CompilationCache* compilation_cache = isolate->compilation_cache();
- MaybeHandle<FixedArray> maybe_cached =
- compilation_cache->LookupRegExp(pattern, flags);
- Handle<FixedArray> cached;
- if (maybe_cached.ToHandle(&cached)) {
- re->set_data(*cached);
- return re;
- }
-
- PostponeInterruptsScope postpone(isolate);
- RegExpCompileData parse_result;
- FlatStringReader reader(isolate, pattern);
- DCHECK(!isolate->has_pending_exception());
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &parse_result)) {
- // Throw an exception if we fail to parse the pattern.
- return ThrowRegExpException(isolate, re, pattern, parse_result.error);
- }
-
- bool has_been_compiled = false;
-
- if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
- !HasFewDifferentCharacters(pattern)) {
- // Parse-tree is a single atom that is equal to the pattern.
- AtomCompile(isolate, re, pattern, flags, pattern);
- has_been_compiled = true;
- } else if (parse_result.tree->IsAtom() && !IsSticky(flags) &&
- parse_result.capture_count == 0) {
- RegExpAtom* atom = parse_result.tree->AsAtom();
- Vector<const uc16> atom_pattern = atom->data();
- Handle<String> atom_string;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, atom_string,
- isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
- if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
- AtomCompile(isolate, re, pattern, flags, atom_string);
- has_been_compiled = true;
- }
- }
- if (!has_been_compiled) {
- IrregexpInitialize(isolate, re, pattern, flags, parse_result.capture_count);
- }
- DCHECK(re->data().IsFixedArray());
- // Compilation succeeded so the data is set on the regexp
- // and we can store it in the cache.
- Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
- compilation_cache->PutRegExp(pattern, flags, data);
-
- return re;
-}
-
-MaybeHandle<Object> RegExpImpl::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info) {
- switch (regexp->TypeTag()) {
- case JSRegExp::ATOM:
- return AtomExec(isolate, regexp, subject, index, last_match_info);
- case JSRegExp::IRREGEXP: {
- return IrregexpExec(isolate, regexp, subject, index, last_match_info);
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// RegExp Atom implementation: Simple string search using indexOf.
-
-void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
- Handle<String> match_pattern) {
- isolate->factory()->SetRegExpAtomData(re, JSRegExp::ATOM, pattern, flags,
- match_pattern);
-}
-
-static void SetAtomLastCapture(Isolate* isolate,
- Handle<RegExpMatchInfo> last_match_info,
- String subject, int from, int to) {
- SealHandleScope shs(isolate);
- last_match_info->SetNumberOfCaptureRegisters(2);
- last_match_info->SetLastSubject(subject);
- last_match_info->SetLastInput(subject);
- last_match_info->SetCapture(0, from);
- last_match_info->SetCapture(1, to);
-}
-
-int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index, int32_t* output,
- int output_size) {
- DCHECK_LE(0, index);
- DCHECK_LE(index, subject->length());
-
- subject = String::Flatten(isolate, subject);
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
-
- String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
- int needle_len = needle.length();
- DCHECK(needle.IsFlat());
- DCHECK_LT(0, needle_len);
-
- if (index + needle_len > subject->length()) {
- return RegExpImpl::RE_FAILURE;
- }
-
- for (int i = 0; i < output_size; i += 2) {
- String::FlatContent needle_content = needle.GetFlatContent(no_gc);
- String::FlatContent subject_content = subject->GetFlatContent(no_gc);
- DCHECK(needle_content.IsFlat());
- DCHECK(subject_content.IsFlat());
- // dispatch on type of strings
- index =
- (needle_content.IsOneByte()
- ? (subject_content.IsOneByte()
- ? SearchString(isolate, subject_content.ToOneByteVector(),
- needle_content.ToOneByteVector(), index)
- : SearchString(isolate, subject_content.ToUC16Vector(),
- needle_content.ToOneByteVector(), index))
- : (subject_content.IsOneByte()
- ? SearchString(isolate, subject_content.ToOneByteVector(),
- needle_content.ToUC16Vector(), index)
- : SearchString(isolate, subject_content.ToUC16Vector(),
- needle_content.ToUC16Vector(), index)));
- if (index == -1) {
- return i / 2; // Return number of matches.
- } else {
- output[i] = index;
- output[i+1] = index + needle_len;
- index += needle_len;
- }
- }
- return output_size / 2;
-}
-
-Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info) {
- static const int kNumRegisters = 2;
- STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
- int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
-
- int res =
- AtomExecRaw(isolate, re, subject, index, output_registers, kNumRegisters);
-
- if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
-
- DCHECK_EQ(res, RegExpImpl::RE_SUCCESS);
- SealHandleScope shs(isolate);
- SetAtomLastCapture(isolate, last_match_info, *subject, output_registers[0],
- output_registers[1]);
- return last_match_info;
-}
-
-
-// Irregexp implementation.
-
-// Ensures that the regexp object contains a compiled version of the
-// source for either one-byte or two-byte subject strings.
-// If the compiled version doesn't already exist, it is compiled
-// from the source pattern.
-// If compilation fails, an exception is thrown and this function
-// returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> sample_subject,
- bool is_one_byte) {
- Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
- if (compiled_code != Smi::FromInt(JSRegExp::kUninitializedValue)) {
- DCHECK(FLAG_regexp_interpret_all ? compiled_code.IsByteArray()
- : compiled_code.IsCode());
- return true;
- }
- return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
-}
-
-bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> sample_subject,
- bool is_one_byte) {
- // Compile the RegExp.
- Zone zone(isolate->allocator(), ZONE_NAME);
- PostponeInterruptsScope postpone(isolate);
-#ifdef DEBUG
- Object entry = re->DataAt(JSRegExp::code_index(is_one_byte));
- // When arriving here entry can only be a smi representing an uncompiled
- // regexp.
- DCHECK(entry.IsSmi());
- int entry_value = Smi::ToInt(entry);
- DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
-#endif
-
- JSRegExp::Flags flags = re->GetFlags();
-
- Handle<String> pattern(re->Pattern(), isolate);
- pattern = String::Flatten(isolate, pattern);
- RegExpCompileData compile_data;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &compile_data)) {
- // Throw an exception if we fail to parse the pattern.
- // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- USE(ThrowRegExpException(isolate, re, pattern, compile_data.error));
- return false;
- }
- RegExpEngine::CompilationResult result =
- RegExpEngine::Compile(isolate, &zone, &compile_data, flags, pattern,
- sample_subject, is_one_byte);
- if (result.error_message != nullptr) {
- // Unable to compile regexp.
- if (FLAG_correctness_fuzzer_suppressions &&
- strncmp(result.error_message, "Stack overflow", 15) == 0) {
- FATAL("Aborting on stack overflow");
- }
- Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
- CStrVector(result.error_message)).ToHandleChecked();
- ThrowRegExpException(isolate, re, error_message);
- return false;
- }
-
- Handle<FixedArray> data =
- Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
- data->set(JSRegExp::code_index(is_one_byte), result.code);
- SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
- int register_max = IrregexpMaxRegisterCount(*data);
- if (result.num_registers > register_max) {
- SetIrregexpMaxRegisterCount(*data, result.num_registers);
- }
-
- return true;
-}
-
-int RegExpImpl::IrregexpMaxRegisterCount(FixedArray re) {
- return Smi::cast(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex)).value();
-}
-
-void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray re, int value) {
- re.set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
-}
-
-void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray re,
- Handle<FixedArray> value) {
- if (value.is_null()) {
- re.set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
- } else {
- re.set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
- }
-}
-
-int RegExpImpl::IrregexpNumberOfCaptures(FixedArray re) {
- return Smi::ToInt(re.get(JSRegExp::kIrregexpCaptureCountIndex));
-}
-
-int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
- return Smi::ToInt(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex));
-}
-
-ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
- return ByteArray::cast(re.get(JSRegExp::code_index(is_one_byte)));
-}
-
-Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
- return Code::cast(re.get(JSRegExp::code_index(is_one_byte)));
-}
-
-void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags, int capture_count) {
- // Initialize compiled code entries to null.
- isolate->factory()->SetRegExpIrregexpData(re, JSRegExp::IRREGEXP, pattern,
- flags, capture_count);
-}
-
-int RegExpImpl::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject) {
- DCHECK(subject->IsFlat());
-
- // Check representation of the underlying storage.
- bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
- if (!EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte)) return -1;
-
- if (FLAG_regexp_interpret_all) {
- // Byte-code regexp needs space allocated for all its registers.
- // The result captures are copied to the start of the registers array
- // if the match succeeds. This way those registers are not clobbered
- // when we set the last match info from last successful match.
- return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) +
- (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
- } else {
- // Native regexp only needs room to output captures. Registers are handled
- // internally.
- return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
- }
-}
-
-int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index,
- int32_t* output, int output_size) {
- Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
-
- DCHECK_LE(0, index);
- DCHECK_LE(index, subject->length());
- DCHECK(subject->IsFlat());
-
- bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
-
- if (!FLAG_regexp_interpret_all) {
- DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
- do {
- EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
- // The stack is used to allocate registers for the compiled regexp code.
- // This means that in case of failure, the output registers array is left
- // untouched and contains the capture results from the previous successful
- // match. We can use that to set the last match info lazily.
- int res = NativeRegExpMacroAssembler::Match(code, subject, output,
- output_size, index, isolate);
- if (res != NativeRegExpMacroAssembler::RETRY) {
- DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
- isolate->has_pending_exception());
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) ==
- RE_SUCCESS);
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::FAILURE) ==
- RE_FAILURE);
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION) ==
- RE_EXCEPTION);
- return res;
- }
- // If result is RETRY, the string has changed representation, and we
- // must restart from scratch.
- // In this case, it means we must make sure we are prepared to handle
- // the, potentially, different subject (the string can switch between
- // being internal and external, and even between being Latin1 and UC16,
- // but the characters are always the same).
- IrregexpPrepare(isolate, regexp, subject);
- is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
- } while (true);
- UNREACHABLE();
- } else {
- DCHECK(FLAG_regexp_interpret_all);
- DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
- // We must have done EnsureCompiledIrregexp, so we can get the number of
- // registers.
- int number_of_capture_registers =
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
- int32_t* raw_output = &output[number_of_capture_registers];
-
- do {
- // We do not touch the actual capture result registers until we know there
- // has been a match so that we can use those capture results to set the
- // last match info.
- for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- raw_output[i] = -1;
- }
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
- isolate);
-
- IrregexpInterpreter::Result result = IrregexpInterpreter::Match(
- isolate, byte_codes, subject, raw_output, index);
- DCHECK_IMPLIES(result == IrregexpInterpreter::EXCEPTION,
- isolate->has_pending_exception());
-
- switch (result) {
- case IrregexpInterpreter::SUCCESS:
- // Copy capture results to the start of the registers array.
- MemCopy(output, raw_output,
- number_of_capture_registers * sizeof(int32_t));
- return result;
- case IrregexpInterpreter::EXCEPTION:
- case IrregexpInterpreter::FAILURE:
- return result;
- case IrregexpInterpreter::RETRY:
- // The string has changed representation, and we must restart the
- // match.
- is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
- EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
- break;
- }
- } while (true);
- UNREACHABLE();
- }
-}
-
-MaybeHandle<Object> RegExpImpl::IrregexpExec(
- Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int previous_index, Handle<RegExpMatchInfo> last_match_info) {
- DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
-
- subject = String::Flatten(isolate, subject);
-
- // Prepare space for the return values.
-#ifdef DEBUG
- if (FLAG_regexp_interpret_all && FLAG_trace_regexp_bytecodes) {
- String pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", pattern.ToCString().get());
- PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
- }
-#endif
- int required_registers =
- RegExpImpl::IrregexpPrepare(isolate, regexp, subject);
- if (required_registers < 0) {
- // Compiling failed with an exception.
- DCHECK(isolate->has_pending_exception());
- return MaybeHandle<Object>();
- }
-
- int32_t* output_registers = nullptr;
- if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- output_registers = NewArray<int32_t>(required_registers);
- }
- std::unique_ptr<int32_t[]> auto_release(output_registers);
- if (output_registers == nullptr) {
- output_registers = isolate->jsregexp_static_offsets_vector();
- }
-
- int res =
- RegExpImpl::IrregexpExecRaw(isolate, regexp, subject, previous_index,
- output_registers, required_registers);
- if (res == RE_SUCCESS) {
- int capture_count =
- IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
- return SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
- output_registers);
- }
- if (res == RE_EXCEPTION) {
- DCHECK(isolate->has_pending_exception());
- return MaybeHandle<Object>();
- }
- DCHECK(res == RE_FAILURE);
- return isolate->factory()->null_value();
-}
-
-Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
- Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
- Handle<String> subject, int capture_count, int32_t* match) {
- // This is the only place where match infos can grow. If, after executing the
- // regexp, RegExpExecStub finds that the match info is too small, it restarts
- // execution in RegExpImpl::Exec, which finally grows the match info right
- // here.
-
- int capture_register_count = (capture_count + 1) * 2;
- Handle<RegExpMatchInfo> result = RegExpMatchInfo::ReserveCaptures(
- isolate, last_match_info, capture_register_count);
- result->SetNumberOfCaptureRegisters(capture_register_count);
-
- if (*result != *last_match_info) {
- if (*last_match_info == *isolate->regexp_last_match_info()) {
- // This inner condition is only needed for special situations like the
- // regexp fuzzer, where we pass our own custom RegExpMatchInfo to
- // RegExpImpl::Exec; there actually want to bypass the Isolate's match
- // info and execute the regexp without side effects.
- isolate->native_context()->set_regexp_last_match_info(*result);
- }
- }
-
- DisallowHeapAllocation no_allocation;
- if (match != nullptr) {
- for (int i = 0; i < capture_register_count; i += 2) {
- result->SetCapture(i, match[i]);
- result->SetCapture(i + 1, match[i + 1]);
- }
- }
- result->SetLastSubject(*subject);
- result->SetLastInput(*subject);
- return result;
-}
-
-RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
- Handle<String> subject, Isolate* isolate)
- : register_array_(nullptr),
- register_array_size_(0),
- regexp_(regexp),
- subject_(subject),
- isolate_(isolate) {
- bool interpreted = FLAG_regexp_interpret_all;
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- static const int kAtomRegistersPerMatch = 2;
- registers_per_match_ = kAtomRegistersPerMatch;
- // There is no distinction between interpreted and native for atom regexps.
- interpreted = false;
- } else {
- registers_per_match_ =
- RegExpImpl::IrregexpPrepare(isolate_, regexp_, subject_);
- if (registers_per_match_ < 0) {
- num_matches_ = -1; // Signal exception.
- return;
- }
- }
-
- DCHECK(IsGlobal(regexp->GetFlags()));
- if (!interpreted) {
- register_array_size_ =
- Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
- max_matches_ = register_array_size_ / registers_per_match_;
- } else {
- // Global loop in interpreted regexp is not implemented. We choose
- // the size of the offsets vector so that it can only store one match.
- register_array_size_ = registers_per_match_;
- max_matches_ = 1;
- }
-
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- register_array_ = NewArray<int32_t>(register_array_size_);
- } else {
- register_array_ = isolate->jsregexp_static_offsets_vector();
- }
-
- // Set state so that fetching the results the first time triggers a call
- // to the compiled regexp.
- current_match_index_ = max_matches_ - 1;
- num_matches_ = max_matches_;
- DCHECK_LE(2, registers_per_match_); // Each match has at least one capture.
- DCHECK_GE(register_array_size_, registers_per_match_);
- int32_t* last_match =
- &register_array_[current_match_index_ * registers_per_match_];
- last_match[0] = -1;
- last_match[1] = 0;
-}
-
-int RegExpImpl::GlobalCache::AdvanceZeroLength(int last_index) {
- if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
- unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
- unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
- // Advance over the surrogate pair.
- return last_index + 2;
- }
- return last_index + 1;
-}
-
-// -------------------------------------------------------------------
-// Implementation of the Irregexp regular expression engine.
-//
-// The Irregexp regular expression engine is intended to be a complete
-// implementation of ECMAScript regular expressions. It generates either
-// bytecodes or native code.
-
-// The Irregexp regexp engine is structured in three steps.
-// 1) The parser generates an abstract syntax tree. See ast.cc.
-// 2) From the AST a node network is created. The nodes are all
-// subclasses of RegExpNode. The nodes represent states when
-// executing a regular expression. Several optimizations are
-// performed on the node network.
-// 3) From the nodes we generate either byte codes or native code
-// that can actually execute the regular expression (perform
-// the search). The code generation step is described in more
-// detail below.
-
-// Code generation.
-//
-// The nodes are divided into four main categories.
-// * Choice nodes
-// These represent places where the regular expression can
-// match in more than one way. For example on entry to an
-// alternation (foo|bar) or a repetition (*, +, ? or {}).
-// * Action nodes
-// These represent places where some action should be
-// performed. Examples include recording the current position
-// in the input string to a register (in order to implement
-// captures) or other actions on register for example in order
-// to implement the counters needed for {} repetitions.
-// * Matching nodes
-// These attempt to match some element part of the input string.
-// Examples of elements include character classes, plain strings
-// or back references.
-// * End nodes
-// These are used to implement the actions required on finding
-// a successful match or failing to find a match.
-//
-// The code generated (whether as byte codes or native code) maintains
-// some state as it runs. This consists of the following elements:
-//
-// * The capture registers. Used for string captures.
-// * Other registers. Used for counters etc.
-// * The current position.
-// * The stack of backtracking information. Used when a matching node
-// fails to find a match and needs to try an alternative.
-//
-// Conceptual regular expression execution model:
-//
-// There is a simple conceptual model of regular expression execution
-// which will be presented first. The actual code generated is a more
-// efficient simulation of the simple conceptual model:
-//
-// * Choice nodes are implemented as follows:
-// For each choice except the last {
-// push current position
-// push backtrack code location
-// <generate code to test for choice>
-// backtrack code location:
-// pop current position
-// }
-// <generate code to test for last choice>
-//
-// * Actions nodes are generated as follows
-// <push affected registers on backtrack stack>
-// <generate code to perform action>
-// push backtrack code location
-// <generate code to test for following nodes>
-// backtrack code location:
-// <pop affected registers to restore their state>
-// <pop backtrack location from stack and go to it>
-//
-// * Matching nodes are generated as follows:
-// if input string matches at current position
-// update current position
-// <generate code to test for following nodes>
-// else
-// <pop backtrack location from stack and go to it>
-//
-// Thus it can be seen that the current position is saved and restored
-// by the choice nodes, whereas the registers are saved and restored by
-// by the action nodes that manipulate them.
-//
-// The other interesting aspect of this model is that nodes are generated
-// at the point where they are needed by a recursive call to Emit(). If
-// the node has already been code generated then the Emit() call will
-// generate a jump to the previously generated code instead. In order to
-// limit recursion it is possible for the Emit() function to put the node
-// on a work list for later generation and instead generate a jump. The
-// destination of the jump is resolved later when the code is generated.
-//
-// Actual regular expression code generation.
-//
-// Code generation is actually more complicated than the above. In order
-// to improve the efficiency of the generated code some optimizations are
-// performed
-//
-// * Choice nodes have 1-character lookahead.
-// A choice node looks at the following character and eliminates some of
-// the choices immediately based on that character. This is not yet
-// implemented.
-// * Simple greedy loops store reduced backtracking information.
-// A quantifier like /.*foo/m will greedily match the whole input. It will
-// then need to backtrack to a point where it can match "foo". The naive
-// implementation of this would push each character position onto the
-// backtracking stack, then pop them off one by one. This would use space
-// proportional to the length of the input string. However since the "."
-// can only match in one way and always has a constant length (in this case
-// of 1) it suffices to store the current position on the top of the stack
-// once. Matching now becomes merely incrementing the current position and
-// backtracking becomes decrementing the current position and checking the
-// result against the stored current position. This is faster and saves
-// space.
-// * The current state is virtualized.
-// This is used to defer expensive operations until it is clear that they
-// are needed and to generate code for a node more than once, allowing
-// specialized an efficient versions of the code to be created. This is
-// explained in the section below.
-//
-// Execution state virtualization.
-//
-// Instead of emitting code, nodes that manipulate the state can record their
-// manipulation in an object called the Trace. The Trace object can record a
-// current position offset, an optional backtrack code location on the top of
-// the virtualized backtrack stack and some register changes. When a node is
-// to be emitted it can flush the Trace or update it. Flushing the Trace
-// will emit code to bring the actual state into line with the virtual state.
-// Avoiding flushing the state can postpone some work (e.g. updates of capture
-// registers). Postponing work can save time when executing the regular
-// expression since it may be found that the work never has to be done as a
-// failure to match can occur. In addition it is much faster to jump to a
-// known backtrack code location than it is to pop an unknown backtrack
-// location from the stack and jump there.
-//
-// The virtual state found in the Trace affects code generation. For example
-// the virtual state contains the difference between the actual current
-// position and the virtual current position, and matching code needs to use
-// this offset to attempt a match in the correct location of the input
-// string. Therefore code generated for a non-trivial trace is specialized
-// to that trace. The code generator therefore has the ability to generate
-// code for each node several times. In order to limit the size of the
-// generated code there is an arbitrary limit on how many specialized sets of
-// code may be generated for a given node. If the limit is reached, the
-// trace is flushed and a generic version of the code for a node is emitted.
-// This is subsequently used for that node. The code emitted for non-generic
-// trace is not recorded in the node and so it cannot currently be reused in
-// the event that code generation is requested for an identical trace.
-
-
-void RegExpTree::AppendToText(RegExpText* text, Zone* zone) {
- UNREACHABLE();
-}
-
-
-void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
- text->AddElement(TextElement::Atom(this), zone);
-}
-
-
-void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) {
- text->AddElement(TextElement::CharClass(this), zone);
-}
-
-
-void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
- for (int i = 0; i < elements()->length(); i++)
- text->AddElement(elements()->at(i), zone);
-}
-
-
-TextElement TextElement::Atom(RegExpAtom* atom) {
- return TextElement(ATOM, atom);
-}
-
-
-TextElement TextElement::CharClass(RegExpCharacterClass* char_class) {
- return TextElement(CHAR_CLASS, char_class);
-}
-
-
-int TextElement::length() const {
- switch (text_type()) {
- case ATOM:
- return atom()->length();
-
- case CHAR_CLASS:
- return 1;
- }
- UNREACHABLE();
-}
-
-
-DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
- if (table_ == nullptr) {
- table_ = new(zone()) DispatchTable(zone());
- DispatchTableConstructor cons(table_, ignore_case, zone());
- cons.BuildTable(this);
- }
- return table_;
-}
-
-
-class FrequencyCollator {
- public:
- FrequencyCollator() : total_samples_(0) {
- for (int i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
- frequencies_[i] = CharacterFrequency(i);
- }
- }
-
- void CountCharacter(int character) {
- int index = (character & RegExpMacroAssembler::kTableMask);
- frequencies_[index].Increment();
- total_samples_++;
- }
-
- // Does not measure in percent, but rather per-128 (the table size from the
- // regexp macro assembler).
- int Frequency(int in_character) {
- DCHECK((in_character & RegExpMacroAssembler::kTableMask) == in_character);
- if (total_samples_ < 1) return 1; // Division by zero.
- int freq_in_per128 =
- (frequencies_[in_character].counter() * 128) / total_samples_;
- return freq_in_per128;
- }
-
- private:
- class CharacterFrequency {
- public:
- CharacterFrequency() : counter_(0), character_(-1) { }
- explicit CharacterFrequency(int character)
- : counter_(0), character_(character) { }
-
- void Increment() { counter_++; }
- int counter() { return counter_; }
- int character() { return character_; }
-
- private:
- int counter_;
- int character_;
- };
-
-
- private:
- CharacterFrequency frequencies_[RegExpMacroAssembler::kTableSize];
- int total_samples_;
-};
-
-
-class RegExpCompiler {
- public:
- RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool is_one_byte);
-
- int AllocateRegister() {
- if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
- reg_exp_too_big_ = true;
- return next_register_;
- }
- return next_register_++;
- }
-
- // Lookarounds to match lone surrogates for unicode character class matches
- // are never nested. We can therefore reuse registers.
- int UnicodeLookaroundStackRegister() {
- if (unicode_lookaround_stack_register_ == kNoRegister) {
- unicode_lookaround_stack_register_ = AllocateRegister();
- }
- return unicode_lookaround_stack_register_;
- }
-
- int UnicodeLookaroundPositionRegister() {
- if (unicode_lookaround_position_register_ == kNoRegister) {
- unicode_lookaround_position_register_ = AllocateRegister();
- }
- return unicode_lookaround_position_register_;
- }
-
- RegExpEngine::CompilationResult Assemble(Isolate* isolate,
- RegExpMacroAssembler* assembler,
- RegExpNode* start, int capture_count,
- Handle<String> pattern);
-
- inline void AddWork(RegExpNode* node) {
- if (!node->on_work_list() && !node->label()->is_bound()) {
- node->set_on_work_list(true);
- work_list_->push_back(node);
- }
- }
-
- static const int kImplementationOffset = 0;
- static const int kNumberOfRegistersOffset = 0;
- static const int kCodeOffset = 1;
-
- RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
- EndNode* accept() { return accept_; }
-
- static const int kMaxRecursion = 100;
- inline int recursion_depth() { return recursion_depth_; }
- inline void IncrementRecursionDepth() { recursion_depth_++; }
- inline void DecrementRecursionDepth() { recursion_depth_--; }
-
- void SetRegExpTooBig() { reg_exp_too_big_ = true; }
-
- inline bool one_byte() { return one_byte_; }
- inline bool optimize() { return optimize_; }
- inline void set_optimize(bool value) { optimize_ = value; }
- inline bool limiting_recursion() { return limiting_recursion_; }
- inline void set_limiting_recursion(bool value) {
- limiting_recursion_ = value;
- }
- bool read_backward() { return read_backward_; }
- void set_read_backward(bool value) { read_backward_ = value; }
- FrequencyCollator* frequency_collator() { return &frequency_collator_; }
-
- int current_expansion_factor() { return current_expansion_factor_; }
- void set_current_expansion_factor(int value) {
- current_expansion_factor_ = value;
- }
-
- Isolate* isolate() const { return isolate_; }
- Zone* zone() const { return zone_; }
-
- static const int kNoRegister = -1;
-
- private:
- EndNode* accept_;
- int next_register_;
- int unicode_lookaround_stack_register_;
- int unicode_lookaround_position_register_;
- std::vector<RegExpNode*>* work_list_;
- int recursion_depth_;
- RegExpMacroAssembler* macro_assembler_;
- bool one_byte_;
- bool reg_exp_too_big_;
- bool limiting_recursion_;
- bool optimize_;
- bool read_backward_;
- int current_expansion_factor_;
- FrequencyCollator frequency_collator_;
- Isolate* isolate_;
- Zone* zone_;
-};
-
-
-class RecursionCheck {
- public:
- explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) {
- compiler->IncrementRecursionDepth();
- }
- ~RecursionCheck() { compiler_->DecrementRecursionDepth(); }
- private:
- RegExpCompiler* compiler_;
-};
-
-
-static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
- return RegExpEngine::CompilationResult(isolate, "RegExp too big");
-}
-
-
-// Attempts to compile the regexp using an Irregexp code generator. Returns
-// a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool one_byte)
- : next_register_(2 * (capture_count + 1)),
- unicode_lookaround_stack_register_(kNoRegister),
- unicode_lookaround_position_register_(kNoRegister),
- work_list_(nullptr),
- recursion_depth_(0),
- one_byte_(one_byte),
- reg_exp_too_big_(false),
- limiting_recursion_(false),
- optimize_(FLAG_regexp_optimization),
- read_backward_(false),
- current_expansion_factor_(1),
- frequency_collator_(),
- isolate_(isolate),
- zone_(zone) {
- accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
- DCHECK_GE(RegExpMacroAssembler::kMaxRegister, next_register_ - 1);
-}
-
-RegExpEngine::CompilationResult RegExpCompiler::Assemble(
- Isolate* isolate, RegExpMacroAssembler* macro_assembler, RegExpNode* start,
- int capture_count, Handle<String> pattern) {
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler)
- macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
- else
-#endif
- macro_assembler_ = macro_assembler;
-
- std::vector<RegExpNode*> work_list;
- work_list_ = &work_list;
- Label fail;
- macro_assembler_->PushBacktrack(&fail);
- Trace new_trace;
- start->Emit(this, &new_trace);
- macro_assembler_->Bind(&fail);
- macro_assembler_->Fail();
- while (!work_list.empty()) {
- RegExpNode* node = work_list.back();
- work_list.pop_back();
- node->set_on_work_list(false);
- if (!node->label()->is_bound()) node->Emit(this, &new_trace);
- }
- if (reg_exp_too_big_) {
- macro_assembler_->AbortedCodeGeneration();
- return IrregexpRegExpTooBig(isolate_);
- }
-
- Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
- isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
- work_list_ = nullptr;
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code && !FLAG_regexp_interpret_all) {
- CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
- OFStream os(trace_scope.file());
- Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
- }
-#endif
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler) {
- delete macro_assembler_;
- }
-#endif
- return RegExpEngine::CompilationResult(*code, next_register_);
-}
-
-
-bool Trace::DeferredAction::Mentions(int that) {
- if (action_type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(this)->range();
- return range.Contains(that);
- } else {
- return reg() == that;
- }
-}
-
-
-bool Trace::mentions_reg(int reg) {
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->Mentions(reg))
- return true;
- }
- return false;
-}
-
-
-bool Trace::GetStoredPosition(int reg, int* cp_offset) {
- DCHECK_EQ(0, *cp_offset);
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->Mentions(reg)) {
- if (action->action_type() == ActionNode::STORE_POSITION) {
- *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
- return true;
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-
-int Trace::FindAffectedRegisters(OutSet* affected_registers,
- Zone* zone) {
- int max_register = RegExpCompiler::kNoRegister;
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->action_type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(action)->range();
- for (int i = range.from(); i <= range.to(); i++)
- affected_registers->Set(i, zone);
- if (range.to() > max_register) max_register = range.to();
- } else {
- affected_registers->Set(action->reg(), zone);
- if (action->reg() > max_register) max_register = action->reg();
- }
- }
- return max_register;
-}
-
-
-void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
- int max_register,
- const OutSet& registers_to_pop,
- const OutSet& registers_to_clear) {
- for (int reg = max_register; reg >= 0; reg--) {
- if (registers_to_pop.Get(reg)) {
- assembler->PopRegister(reg);
- } else if (registers_to_clear.Get(reg)) {
- int clear_to = reg;
- while (reg > 0 && registers_to_clear.Get(reg - 1)) {
- reg--;
- }
- assembler->ClearRegisters(reg, clear_to);
- }
- }
-}
-
-
-void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
- int max_register,
- const OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear,
- Zone* zone) {
- // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
- const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
-
- // Count pushes performed to force a stack limit check occasionally.
- int pushes = 0;
-
- for (int reg = 0; reg <= max_register; reg++) {
- if (!affected_registers.Get(reg)) {
- continue;
- }
-
- // The chronologically first deferred action in the trace
- // is used to infer the action needed to restore a register
- // to its previous state (or not, if it's safe to ignore it).
- enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR };
- DeferredActionUndoType undo_action = IGNORE;
-
- int value = 0;
- bool absolute = false;
- bool clear = false;
- static const int kNoStore = kMinInt;
- int store_position = kNoStore;
- // This is a little tricky because we are scanning the actions in reverse
- // historical order (newest first).
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->Mentions(reg)) {
- switch (action->action_type()) {
- case ActionNode::SET_REGISTER: {
- Trace::DeferredSetRegister* psr =
- static_cast<Trace::DeferredSetRegister*>(action);
- if (!absolute) {
- value += psr->value();
- absolute = true;
- }
- // SET_REGISTER is currently only used for newly introduced loop
- // counters. They can have a significant previous value if they
- // occur in a loop. TODO(lrn): Propagate this information, so
- // we can set undo_action to IGNORE if we know there is no value to
- // restore.
- undo_action = RESTORE;
- DCHECK_EQ(store_position, kNoStore);
- DCHECK(!clear);
- break;
- }
- case ActionNode::INCREMENT_REGISTER:
- if (!absolute) {
- value++;
- }
- DCHECK_EQ(store_position, kNoStore);
- DCHECK(!clear);
- undo_action = RESTORE;
- break;
- case ActionNode::STORE_POSITION: {
- Trace::DeferredCapture* pc =
- static_cast<Trace::DeferredCapture*>(action);
- if (!clear && store_position == kNoStore) {
- store_position = pc->cp_offset();
- }
-
- // For captures we know that stores and clears alternate.
- // Other register, are never cleared, and if the occur
- // inside a loop, they might be assigned more than once.
- if (reg <= 1) {
- // Registers zero and one, aka "capture zero", is
- // always set correctly if we succeed. There is no
- // need to undo a setting on backtrack, because we
- // will set it again or fail.
- undo_action = IGNORE;
- } else {
- undo_action = pc->is_capture() ? CLEAR : RESTORE;
- }
- DCHECK(!absolute);
- DCHECK_EQ(value, 0);
- break;
- }
- case ActionNode::CLEAR_CAPTURES: {
- // Since we're scanning in reverse order, if we've already
- // set the position we have to ignore historically earlier
- // clearing operations.
- if (store_position == kNoStore) {
- clear = true;
- }
- undo_action = RESTORE;
- DCHECK(!absolute);
- DCHECK_EQ(value, 0);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- // Prepare for the undo-action (e.g., push if it's going to be popped).
- if (undo_action == RESTORE) {
- pushes++;
- RegExpMacroAssembler::StackCheckFlag stack_check =
- RegExpMacroAssembler::kNoStackLimitCheck;
- if (pushes == push_limit) {
- stack_check = RegExpMacroAssembler::kCheckStackLimit;
- pushes = 0;
- }
-
- assembler->PushRegister(reg, stack_check);
- registers_to_pop->Set(reg, zone);
- } else if (undo_action == CLEAR) {
- registers_to_clear->Set(reg, zone);
- }
- // Perform the chronologically last action (or accumulated increment)
- // for the register.
- if (store_position != kNoStore) {
- assembler->WriteCurrentPositionToRegister(reg, store_position);
- } else if (clear) {
- assembler->ClearRegisters(reg, reg);
- } else if (absolute) {
- assembler->SetRegister(reg, value);
- } else if (value != 0) {
- assembler->AdvanceRegister(reg, value);
- }
- }
-}
-
-
-// This is called as we come into a loop choice node and some other tricky
-// nodes. It normalizes the state of the code generator to ensure we can
-// generate generic code.
-void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- DCHECK(!is_trivial());
-
- if (actions_ == nullptr && backtrack() == nullptr) {
- // Here we just have some deferred cp advances to fix and we are back to
- // a normal situation. We may also have to forget some information gained
- // through a quick check that was already performed.
- if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_);
- // Create a new trivial state and generate the node with that.
- Trace new_state;
- successor->Emit(compiler, &new_state);
- return;
- }
-
- // Generate deferred actions here along with code to undo them again.
- OutSet affected_registers;
-
- if (backtrack() != nullptr) {
- // Here we have a concrete backtrack location. These are set up by choice
- // nodes and so they indicate that we have a deferred save of the current
- // position which we may need to emit here.
- assembler->PushCurrentPosition();
- }
-
- int max_register = FindAffectedRegisters(&affected_registers,
- compiler->zone());
- OutSet registers_to_pop;
- OutSet registers_to_clear;
- PerformDeferredActions(assembler,
- max_register,
- affected_registers,
- &registers_to_pop,
- &registers_to_clear,
- compiler->zone());
- if (cp_offset_ != 0) {
- assembler->AdvanceCurrentPosition(cp_offset_);
- }
-
- // Create a new trivial state and generate the node with that.
- Label undo;
- assembler->PushBacktrack(&undo);
- if (successor->KeepRecursing(compiler)) {
- Trace new_state;
- successor->Emit(compiler, &new_state);
- } else {
- compiler->AddWork(successor);
- assembler->GoTo(successor->label());
- }
-
- // On backtrack we need to restore state.
- assembler->Bind(&undo);
- RestoreAffectedRegisters(assembler,
- max_register,
- registers_to_pop,
- registers_to_clear);
- if (backtrack() == nullptr) {
- assembler->Backtrack();
- } else {
- assembler->PopCurrentPosition();
- assembler->GoTo(backtrack());
- }
-}
-
-
-void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- // Omit flushing the trace. We discard the entire stack frame anyway.
-
- if (!label()->is_bound()) {
- // We are completely independent of the trace, since we ignore it,
- // so this code can be used as the generic version.
- assembler->Bind(label());
- }
-
- // Throw away everything on the backtrack stack since the start
- // of the negative submatch and restore the character position.
- assembler->ReadCurrentPositionFromRegister(current_position_register_);
- assembler->ReadStackPointerFromRegister(stack_pointer_register_);
- if (clear_capture_count_ > 0) {
- // Clear any captures that might have been performed during the success
- // of the body of the negative look-ahead.
- int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1;
- assembler->ClearRegisters(clear_capture_start_, clear_capture_end);
- }
- // Now that we have unwound the stack we find at the top of the stack the
- // backtrack that the BeginSubmatch node got.
- assembler->Backtrack();
-}
-
-
-void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!label()->is_bound()) {
- assembler->Bind(label());
- }
- switch (action_) {
- case ACCEPT:
- assembler->Succeed();
- return;
- case BACKTRACK:
- assembler->GoTo(trace->backtrack());
- return;
- case NEGATIVE_SUBMATCH_SUCCESS:
- // This case is handled in a different virtual method.
- UNREACHABLE();
- }
- UNIMPLEMENTED();
-}
-
-
-void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
- if (guards_ == nullptr) guards_ = new (zone) ZoneList<Guard*>(1, zone);
- guards_->Add(guard, zone);
-}
-
-
-ActionNode* ActionNode::SetRegister(int reg,
- int val,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(SET_REGISTER, on_success);
- result->data_.u_store_register.reg = reg;
- result->data_.u_store_register.value = val;
- return result;
-}
-
-
-ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success);
- result->data_.u_increment_register.reg = reg;
- return result;
-}
-
-
-ActionNode* ActionNode::StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(STORE_POSITION, on_success);
- result->data_.u_position_register.reg = reg;
- result->data_.u_position_register.is_capture = is_capture;
- return result;
-}
-
-
-ActionNode* ActionNode::ClearCaptures(Interval range,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success);
- result->data_.u_clear_captures.range_from = range.from();
- result->data_.u_clear_captures.range_to = range.to();
- return result;
-}
-
-
-ActionNode* ActionNode::BeginSubmatch(int stack_reg,
- int position_reg,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- return result;
-}
-
-
-ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg,
- int position_reg,
- int clear_register_count,
- int clear_register_from,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- result->data_.u_submatch.clear_register_count = clear_register_count;
- result->data_.u_submatch.clear_register_from = clear_register_from;
- return result;
-}
-
-
-ActionNode* ActionNode::EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success);
- result->data_.u_empty_match_check.start_register = start_register;
- result->data_.u_empty_match_check.repetition_register = repetition_register;
- result->data_.u_empty_match_check.repetition_limit = repetition_limit;
- return result;
-}
-
-
-#define DEFINE_ACCEPT(Type) \
- void Type##Node::Accept(NodeVisitor* visitor) { \
- visitor->Visit##Type(this); \
- }
-FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
-#undef DEFINE_ACCEPT
-
-
-void LoopChoiceNode::Accept(NodeVisitor* visitor) {
- visitor->VisitLoopChoice(this);
-}
-
-
-// -------------------------------------------------------------------
-// Emit code.
-
-
-void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace) {
- switch (guard->op()) {
- case Guard::LT:
- DCHECK(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterGE(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- case Guard::GEQ:
- DCHECK(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterLT(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- }
-}
-
-
-// Returns the number of characters in the equivalence class, omitting those
-// that cannot occur in the source string because it is Latin1.
-static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
- bool one_byte_subject,
- unibrow::uchar* letters,
- int letter_length) {
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeSet set;
- set.add(character);
- set = set.closeOver(USET_CASE_INSENSITIVE);
- int32_t range_count = set.getRangeCount();
- int items = 0;
- for (int32_t i = 0; i < range_count; i++) {
- UChar32 start = set.getRangeStart(i);
- UChar32 end = set.getRangeEnd(i);
- CHECK(end - start + items <= letter_length);
- while (start <= end) {
- if (one_byte_subject && start > String::kMaxOneByteCharCode) break;
- letters[items++] = (unibrow::uchar)(start);
- start++;
- }
- }
- return items;
-#else
- int length =
- isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
- // Unibrow returns 0 or 1 for characters where case independence is
- // trivial.
- if (length == 0) {
- letters[0] = character;
- length = 1;
- }
-
- if (one_byte_subject) {
- int new_length = 0;
- for (int i = 0; i < length; i++) {
- if (letters[i] <= String::kMaxOneByteCharCode) {
- letters[new_length++] = letters[i];
- }
- }
- length = new_length;
- }
-
- return length;
-#endif // V8_INTL_SUPPORT
-}
-
-static inline bool EmitSimpleCharacter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- bool bound_checked = false;
- if (!preloaded) {
- assembler->LoadCurrentCharacter(
- cp_offset,
- on_failure,
- check);
- bound_checked = true;
- }
- assembler->CheckNotCharacter(c, on_failure);
- return bound_checked;
-}
-
-
-// Only emits non-letters (things that don't have case). Only used for case
-// independent matches.
-static inline bool EmitAtomNonLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool one_byte = compiler->one_byte();
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
- if (length < 1) {
- // This can't match. Must be an one-byte subject and a non-one-byte
- // character. We do not need to do anything since the one-byte pass
- // already handled this.
- return false; // Bounds not checked.
- }
- bool checked = false;
- // We handle the length > 1 case in a later pass.
- if (length == 1) {
- if (one_byte && c > String::kMaxOneByteCharCodeU) {
- // Can't match - see above.
- return false; // Bounds not checked.
- }
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- checked = check;
- }
- macro_assembler->CheckNotCharacter(c, on_failure);
- }
- return checked;
-}
-
-
-static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
- bool one_byte, uc16 c1, uc16 c2,
- Label* on_failure) {
- uc16 char_mask;
- if (one_byte) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- uc16 exor = c1 ^ c2;
- // Check whether exor has only one bit set.
- if (((exor - 1) & exor) == 0) {
- // If c1 and c2 differ only by one bit.
- // Ecma262UnCanonicalize always gives the highest number last.
- DCHECK(c2 > c1);
- uc16 mask = char_mask ^ exor;
- macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
- return true;
- }
- DCHECK(c2 > c1);
- uc16 diff = c2 - c1;
- if (((diff - 1) & diff) == 0 && c1 >= diff) {
- // If the characters differ by 2^n but don't differ by one bit then
- // subtract the difference from the found character, then do the or
- // trick. We avoid the theoretical case where negative numbers are
- // involved in order to simplify code generation.
- uc16 mask = char_mask ^ diff;
- macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff,
- diff,
- mask,
- on_failure);
- return true;
- }
- return false;
-}
-
-using EmitCharacterFunction = bool(Isolate* isolate, RegExpCompiler* compiler,
- uc16 c, Label* on_failure, int cp_offset,
- bool check, bool preloaded);
-
-// Only emits letters (things that have case). Only used for case independent
-// matches.
-static inline bool EmitAtomLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool one_byte = compiler->one_byte();
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
- if (length <= 1) return false;
- // We may not need to check against the end of the input string
- // if this character lies before a character that matched.
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- }
- Label ok;
- switch (length) {
- case 2: {
- if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0],
- chars[1], on_failure)) {
- } else {
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckNotCharacter(chars[1], on_failure);
- macro_assembler->Bind(&ok);
- }
- break;
- }
- case 4:
- macro_assembler->CheckCharacter(chars[3], &ok);
- V8_FALLTHROUGH;
- case 3:
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckCharacter(chars[1], &ok);
- macro_assembler->CheckNotCharacter(chars[2], on_failure);
- macro_assembler->Bind(&ok);
- break;
- default:
- UNREACHABLE();
- }
- return true;
-}
-
-
-static void EmitBoundaryTest(RegExpMacroAssembler* masm,
- int border,
- Label* fall_through,
- Label* above_or_equal,
- Label* below) {
- if (below != fall_through) {
- masm->CheckCharacterLT(border, below);
- if (above_or_equal != fall_through) masm->GoTo(above_or_equal);
- } else {
- masm->CheckCharacterGT(border - 1, above_or_equal);
- }
-}
-
-
-static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm,
- int first,
- int last,
- Label* fall_through,
- Label* in_range,
- Label* out_of_range) {
- if (in_range == fall_through) {
- if (first == last) {
- masm->CheckNotCharacter(first, out_of_range);
- } else {
- masm->CheckCharacterNotInRange(first, last, out_of_range);
- }
- } else {
- if (first == last) {
- masm->CheckCharacter(first, in_range);
- } else {
- masm->CheckCharacterInRange(first, last, in_range);
- }
- if (out_of_range != fall_through) masm->GoTo(out_of_range);
- }
-}
-
-
-// even_label is for ranges[i] to ranges[i + 1] where i - start_index is even.
-// odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd.
-static void EmitUseLookupTable(
- RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int min_char,
- Label* fall_through,
- Label* even_label,
- Label* odd_label) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
-
- int base = (min_char & ~kMask);
- USE(base);
-
- // Assert that everything is on one kTableSize page.
- for (int i = start_index; i <= end_index; i++) {
- DCHECK_EQ(ranges->at(i) & ~kMask, base);
- }
- DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
-
- char templ[kSize];
- Label* on_bit_set;
- Label* on_bit_clear;
- int bit;
- if (even_label == fall_through) {
- on_bit_set = odd_label;
- on_bit_clear = even_label;
- bit = 1;
- } else {
- on_bit_set = even_label;
- on_bit_clear = odd_label;
- bit = 0;
- }
- for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) {
- templ[i] = bit;
- }
- int j = 0;
- bit ^= 1;
- for (int i = start_index; i < end_index; i++) {
- for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) {
- templ[j] = bit;
- }
- bit ^= 1;
- }
- for (int i = j; i < kSize; i++) {
- templ[i] = bit;
- }
- Factory* factory = masm->isolate()->factory();
- // TODO(erikcorry): Cache these.
- Handle<ByteArray> ba = factory->NewByteArray(kSize, AllocationType::kOld);
- for (int i = 0; i < kSize; i++) {
- ba->set(i, templ[i]);
- }
- masm->CheckBitInTable(ba, on_bit_set);
- if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear);
-}
-
-
-static void CutOutRange(RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int cut_index,
- Label* even_label,
- Label* odd_label) {
- bool odd = (((cut_index - start_index) & 1) == 1);
- Label* in_range_label = odd ? odd_label : even_label;
- Label dummy;
- EmitDoubleBoundaryTest(masm,
- ranges->at(cut_index),
- ranges->at(cut_index + 1) - 1,
- &dummy,
- in_range_label,
- &dummy);
- DCHECK(!dummy.is_linked());
- // Cut out the single range by rewriting the array. This creates a new
- // range that is a merger of the two ranges on either side of the one we
- // are cutting out. The oddity of the labels is preserved.
- for (int j = cut_index; j > start_index; j--) {
- ranges->at(j) = ranges->at(j - 1);
- }
- for (int j = cut_index + 1; j < end_index; j++) {
- ranges->at(j) = ranges->at(j + 1);
- }
-}
-
-
-// Unicode case. Split the search space into kSize spaces that are handled
-// with recursion.
-static void SplitSearchSpace(ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int* new_start_index,
- int* new_end_index,
- int* border) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
-
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
-
- *new_start_index = start_index;
- *border = (ranges->at(start_index) & ~kMask) + kSize;
- while (*new_start_index < end_index) {
- if (ranges->at(*new_start_index) > *border) break;
- (*new_start_index)++;
- }
- // new_start_index is the index of the first edge that is beyond the
- // current kSize space.
-
- // For very large search spaces we do a binary chop search of the non-Latin1
- // space instead of just going to the end of the current kSize space. The
- // heuristics are complicated a little by the fact that any 128-character
- // encoding space can be quickly tested with a table lookup, so we don't
- // wish to do binary chop search at a smaller granularity than that. A
- // 128-character space can take up a lot of space in the ranges array if,
- // for example, we only want to match every second character (eg. the lower
- // case characters on some Unicode pages).
- int binary_chop_index = (end_index + start_index) / 2;
- // The first test ensures that we get to the code that handles the Latin1
- // range with a single not-taken branch, speeding up this important
- // character range (even non-Latin1 charset-based text has spaces and
- // punctuation).
- if (*border - 1 > String::kMaxOneByteCharCode && // Latin1 case.
- end_index - start_index > (*new_start_index - start_index) * 2 &&
- last - first > kSize * 2 && binary_chop_index > *new_start_index &&
- ranges->at(binary_chop_index) >= first + 2 * kSize) {
- int scan_forward_for_section_border = binary_chop_index;;
- int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
-
- while (scan_forward_for_section_border < end_index) {
- if (ranges->at(scan_forward_for_section_border) > new_border) {
- *new_start_index = scan_forward_for_section_border;
- *border = new_border;
- break;
- }
- scan_forward_for_section_border++;
- }
- }
-
- DCHECK(*new_start_index > start_index);
- *new_end_index = *new_start_index - 1;
- if (ranges->at(*new_end_index) == *border) {
- (*new_end_index)--;
- }
- if (*border >= ranges->at(end_index)) {
- *border = ranges->at(end_index);
- *new_start_index = end_index; // Won't be used.
- *new_end_index = end_index - 1;
- }
-}
-
-// Gets a series of segment boundaries representing a character class. If the
-// character is in the range between an even and an odd boundary (counting from
-// start_index) then go to even_label, otherwise go to odd_label. We already
-// know that the character is in the range of min_char to max_char inclusive.
-// Either label can be nullptr indicating backtracking. Either label can also
-// be equal to the fall_through label.
-static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
- int start_index, int end_index, uc32 min_char,
- uc32 max_char, Label* fall_through,
- Label* even_label, Label* odd_label) {
- DCHECK_LE(min_char, String::kMaxUtf16CodeUnit);
- DCHECK_LE(max_char, String::kMaxUtf16CodeUnit);
-
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
-
- DCHECK_LT(min_char, first);
-
- // Just need to test if the character is before or on-or-after
- // a particular character.
- if (start_index == end_index) {
- EmitBoundaryTest(masm, first, fall_through, even_label, odd_label);
- return;
- }
-
- // Another almost trivial case: There is one interval in the middle that is
- // different from the end intervals.
- if (start_index + 1 == end_index) {
- EmitDoubleBoundaryTest(
- masm, first, last, fall_through, even_label, odd_label);
- return;
- }
-
- // It's not worth using table lookup if there are very few intervals in the
- // character class.
- if (end_index - start_index <= 6) {
- // It is faster to test for individual characters, so we look for those
- // first, then try arbitrary ranges in the second round.
- static int kNoCutIndex = -1;
- int cut = kNoCutIndex;
- for (int i = start_index; i < end_index; i++) {
- if (ranges->at(i) == ranges->at(i + 1) - 1) {
- cut = i;
- break;
- }
- }
- if (cut == kNoCutIndex) cut = start_index;
- CutOutRange(
- masm, ranges, start_index, end_index, cut, even_label, odd_label);
- DCHECK_GE(end_index - start_index, 2);
- GenerateBranches(masm,
- ranges,
- start_index + 1,
- end_index - 1,
- min_char,
- max_char,
- fall_through,
- even_label,
- odd_label);
- return;
- }
-
- // If there are a lot of intervals in the regexp, then we will use tables to
- // determine whether the character is inside or outside the character class.
- static const int kBits = RegExpMacroAssembler::kTableSizeBits;
-
- if ((max_char >> kBits) == (min_char >> kBits)) {
- EmitUseLookupTable(masm,
- ranges,
- start_index,
- end_index,
- min_char,
- fall_through,
- even_label,
- odd_label);
- return;
- }
-
- if ((min_char >> kBits) != (first >> kBits)) {
- masm->CheckCharacterLT(first, odd_label);
- GenerateBranches(masm,
- ranges,
- start_index + 1,
- end_index,
- first,
- max_char,
- fall_through,
- odd_label,
- even_label);
- return;
- }
-
- int new_start_index = 0;
- int new_end_index = 0;
- int border = 0;
-
- SplitSearchSpace(ranges,
- start_index,
- end_index,
- &new_start_index,
- &new_end_index,
- &border);
-
- Label handle_rest;
- Label* above = &handle_rest;
- if (border == last + 1) {
- // We didn't find any section that started after the limit, so everything
- // above the border is one of the terminal labels.
- above = (end_index & 1) != (start_index & 1) ? odd_label : even_label;
- DCHECK(new_end_index == end_index - 1);
- }
-
- DCHECK_LE(start_index, new_end_index);
- DCHECK_LE(new_start_index, end_index);
- DCHECK_LT(start_index, new_start_index);
- DCHECK_LT(new_end_index, end_index);
- DCHECK(new_end_index + 1 == new_start_index ||
- (new_end_index + 2 == new_start_index &&
- border == ranges->at(new_end_index + 1)));
- DCHECK_LT(min_char, border - 1);
- DCHECK_LT(border, max_char);
- DCHECK_LT(ranges->at(new_end_index), border);
- DCHECK(border < ranges->at(new_start_index) ||
- (border == ranges->at(new_start_index) &&
- new_start_index == end_index &&
- new_end_index == end_index - 1 &&
- border == last + 1));
- DCHECK(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
-
- masm->CheckCharacterGT(border - 1, above);
- Label dummy;
- GenerateBranches(masm,
- ranges,
- start_index,
- new_end_index,
- min_char,
- border - 1,
- &dummy,
- even_label,
- odd_label);
- if (handle_rest.is_linked()) {
- masm->Bind(&handle_rest);
- bool flip = (new_start_index & 1) != (start_index & 1);
- GenerateBranches(masm,
- ranges,
- new_start_index,
- end_index,
- border,
- max_char,
- &dummy,
- flip ? odd_label : even_label,
- flip ? even_label : odd_label);
- }
-}
-
-
-static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
- RegExpCharacterClass* cc, bool one_byte,
- Label* on_failure, int cp_offset, bool check_offset,
- bool preloaded, Zone* zone) {
- ZoneList<CharacterRange>* ranges = cc->ranges(zone);
- CharacterRange::Canonicalize(ranges);
-
- int max_char;
- if (one_byte) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
-
- int range_count = ranges->length();
-
- int last_valid_range = range_count - 1;
- while (last_valid_range >= 0) {
- CharacterRange& range = ranges->at(last_valid_range);
- if (range.from() <= max_char) {
- break;
- }
- last_valid_range--;
- }
-
- if (last_valid_range < 0) {
- if (!cc->is_negated()) {
- macro_assembler->GoTo(on_failure);
- }
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- return;
- }
-
- if (last_valid_range == 0 &&
- ranges->at(0).IsEverything(max_char)) {
- if (cc->is_negated()) {
- macro_assembler->GoTo(on_failure);
- } else {
- // This is a common case hit by non-anchored expressions.
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- }
- return;
- }
-
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
- }
-
- if (cc->is_standard(zone) &&
- macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
- on_failure)) {
- return;
- }
-
-
- // A new list with ascending entries. Each entry is a code unit
- // where there is a boundary between code units that are part of
- // the class and code units that are not. Normally we insert an
- // entry at zero which goes to the failure label, but if there
- // was already one there we fall through for success on that entry.
- // Subsequent entries have alternating meaning (success/failure).
- ZoneList<int>* range_boundaries =
- new(zone) ZoneList<int>(last_valid_range, zone);
-
- bool zeroth_entry_is_failure = !cc->is_negated();
-
- for (int i = 0; i <= last_valid_range; i++) {
- CharacterRange& range = ranges->at(i);
- if (range.from() == 0) {
- DCHECK_EQ(i, 0);
- zeroth_entry_is_failure = !zeroth_entry_is_failure;
- } else {
- range_boundaries->Add(range.from(), zone);
- }
- range_boundaries->Add(range.to() + 1, zone);
- }
- int end_index = range_boundaries->length() - 1;
- if (range_boundaries->at(end_index) > max_char) {
- end_index--;
- }
-
- Label fall_through;
- GenerateBranches(macro_assembler,
- range_boundaries,
- 0, // start_index.
- end_index,
- 0, // min_char.
- max_char,
- &fall_through,
- zeroth_entry_is_failure ? &fall_through : on_failure,
- zeroth_entry_is_failure ? on_failure : &fall_through);
- macro_assembler->Bind(&fall_through);
-}
-
-RegExpNode::~RegExpNode() = default;
-
-RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
- Trace* trace) {
- // If we are generating a greedy loop then don't stop and don't reuse code.
- if (trace->stop_node() != nullptr) {
- return CONTINUE;
- }
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->is_trivial()) {
- if (label_.is_bound() || on_work_list() || !KeepRecursing(compiler)) {
- // If a generic version is already scheduled to be generated or we have
- // recursed too deeply then just generate a jump to that code.
- macro_assembler->GoTo(&label_);
- // This will queue it up for generation of a generic version if it hasn't
- // already been queued.
- compiler->AddWork(this);
- return DONE;
- }
- // Generate generic version of the node and bind the label for later use.
- macro_assembler->Bind(&label_);
- return CONTINUE;
- }
-
- // We are being asked to make a non-generic version. Keep track of how many
- // non-generic versions we generate so as not to overdo it.
- trace_count_++;
- if (KeepRecursing(compiler) && compiler->optimize() &&
- trace_count_ < kMaxCopiesCodeGenerated) {
- return CONTINUE;
- }
-
- // If we get here code has been generated for this node too many times or
- // recursion is too deep. Time to switch to a generic version. The code for
- // generic versions above can handle deep recursion properly.
- bool was_limiting = compiler->limiting_recursion();
- compiler->set_limiting_recursion(true);
- trace->Flush(compiler, this);
- compiler->set_limiting_recursion(was_limiting);
- return DONE;
-}
-
-
-bool RegExpNode::KeepRecursing(RegExpCompiler* compiler) {
- return !compiler->limiting_recursion() &&
- compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion;
-}
-
-
-int ActionNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- if (action_type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- }
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-int AssertionNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- // If we know we are not at the start and we are asked "how many characters
- // will you match if you succeed?" then we can answer anything since false
- // implies false. So lets just return the max answer (still_to_find) since
- // that won't prevent us from preloading a lot of characters for the other
- // branches in the node graph.
- if (assertion_type() == AT_START && not_at_start) return still_to_find;
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- // Match the behaviour of EatsAtLeast on this node.
- if (assertion_type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-int BackReferenceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (read_backward()) return 0;
- if (budget <= 0) return 0;
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-int TextNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (read_backward()) return 0;
- int answer = Length();
- if (answer >= still_to_find) return answer;
- if (budget <= 0) return answer;
- // We are not at start after this node so we set the last argument to 'true'.
- return answer + on_success()->EatsAtLeast(still_to_find - answer,
- budget - 1,
- true);
-}
-
-
-int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
-}
-
-
-void NegativeLookaroundChoiceNode::GetQuickCheckDetails(
- QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in,
- bool not_at_start) {
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeastHelper(int still_to_find,
- int budget,
- RegExpNode* ignore_this_node,
- bool not_at_start) {
- if (budget <= 0) return 0;
- int min = 100;
- int choice_count = alternatives_->length();
- budget = (budget - 1) / choice_count;
- for (int i = 0; i < choice_count; i++) {
- RegExpNode* node = alternatives_->at(i).node();
- if (node == ignore_this_node) continue;
- int node_eats_at_least =
- node->EatsAtLeast(still_to_find, budget, not_at_start);
- if (node_eats_at_least < min) min = node_eats_at_least;
- if (min == 0) return 0;
- }
- return min;
-}
-
-
-int LoopChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find,
- budget - 1,
- loop_node_,
- not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find, budget, nullptr, not_at_start);
-}
-
-
-// Takes the left-most 1-bit and smears it out, setting all bits to its right.
-static inline uint32_t SmearBitsRight(uint32_t v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return v;
-}
-
-
-bool QuickCheckDetails::Rationalize(bool asc) {
- bool found_useful_op = false;
- uint32_t char_mask;
- if (asc) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- mask_ = 0;
- value_ = 0;
- int char_shift = 0;
- for (int i = 0; i < characters_; i++) {
- Position* pos = &positions_[i];
- if ((pos->mask & String::kMaxOneByteCharCode) != 0) {
- found_useful_op = true;
- }
- mask_ |= (pos->mask & char_mask) << char_shift;
- value_ |= (pos->value & char_mask) << char_shift;
- char_shift += asc ? 8 : 16;
- }
- return found_useful_op;
-}
-
-
-bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
- Trace* bounds_check_trace,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details,
- bool fall_through_on_failure) {
- if (details->characters() == 0) return false;
- GetQuickCheckDetails(
- details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE);
- if (details->cannot_match()) return false;
- if (!details->Rationalize(compiler->one_byte())) return false;
- DCHECK(details->characters() == 1 ||
- compiler->macro_assembler()->CanReadUnaligned());
- uint32_t mask = details->mask();
- uint32_t value = details->value();
-
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- if (trace->characters_preloaded() != details->characters()) {
- DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset());
- // We are attempting to preload the minimum number of characters
- // any choice would eat, so if the bounds check fails, then none of the
- // choices can succeed, so we can just immediately backtrack, rather
- // than go to the next choice.
- assembler->LoadCurrentCharacter(trace->cp_offset(),
- bounds_check_trace->backtrack(),
- !preload_has_checked_bounds,
- details->characters());
- }
-
-
- bool need_mask = true;
-
- if (details->characters() == 1) {
- // If number of characters preloaded is 1 then we used a byte or 16 bit
- // load so the value is already masked down.
- uint32_t char_mask;
- if (compiler->one_byte()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- if ((mask & char_mask) == char_mask) need_mask = false;
- mask &= char_mask;
- } else {
- // For 2-character preloads in one-byte mode or 1-character preloads in
- // two-byte mode we also use a 16 bit load with zero extend.
- static const uint32_t kTwoByteMask = 0xFFFF;
- static const uint32_t kFourByteMask = 0xFFFFFFFF;
- if (details->characters() == 2 && compiler->one_byte()) {
- if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
- } else if (details->characters() == 1 && !compiler->one_byte()) {
- if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
- } else {
- if (mask == kFourByteMask) need_mask = false;
- }
- }
-
- if (fall_through_on_failure) {
- if (need_mask) {
- assembler->CheckCharacterAfterAnd(value, mask, on_possible_success);
- } else {
- assembler->CheckCharacter(value, on_possible_success);
- }
- } else {
- if (need_mask) {
- assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack());
- } else {
- assembler->CheckNotCharacter(value, trace->backtrack());
- }
- }
- return true;
-}
-
-
-// Here is the meat of GetQuickCheckDetails (see also the comment on the
-// super-class in the .h file).
-//
-// We iterate along the text object, building up for each character a
-// mask and value that can be used to test for a quick failure to match.
-// The masks and values for the positions will be combined into a single
-// machine word for the current character width in order to be used in
-// generating a quick check.
-void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- // Do not collect any quick check details if the text node reads backward,
- // since it reads in the opposite direction than we use for quick checks.
- if (read_backward()) return;
- Isolate* isolate = compiler->macro_assembler()->isolate();
- DCHECK(characters_filled_in < details->characters());
- int characters = details->characters();
- int char_mask;
- if (compiler->one_byte()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- for (int k = 0; k < elements()->length(); k++) {
- TextElement elm = elements()->at(k);
- if (elm.text_type() == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.atom()->data();
- for (int i = 0; i < characters && i < quarks.length(); i++) {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- uc16 c = quarks[i];
- if (elm.atom()->ignore_case()) {
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(
- isolate, c, compiler->one_byte(), chars, 4);
- if (length == 0) {
- // This can happen because all case variants are non-Latin1, but we
- // know the input is Latin1.
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- if (length == 1) {
- // This letter has no case equivalents, so it's nice and simple
- // and the mask-compare will determine definitely whether we have
- // a match at this character position.
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- } else {
- uint32_t common_bits = char_mask;
- uint32_t bits = chars[0];
- for (int j = 1; j < length; j++) {
- uint32_t differing_bits = ((chars[j] & common_bits) ^ bits);
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- // If length is 2 and common bits has only one zero in it then
- // our mask and compare instruction will determine definitely
- // whether we have a match at this character position. Otherwise
- // it can only be an approximate check.
- uint32_t one_zero = (common_bits | ~char_mask);
- if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) {
- pos->determines_perfectly = true;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- } else {
- // Don't ignore case. Nice simple case where the mask-compare will
- // determine definitely whether we have a match at this character
- // position.
- if (c > char_mask) {
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- }
- characters_filled_in++;
- DCHECK(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- } else {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- RegExpCharacterClass* tree = elm.char_class();
- ZoneList<CharacterRange>* ranges = tree->ranges(zone());
- DCHECK(!ranges->is_empty());
- if (tree->is_negated()) {
- // A quick check uses multi-character mask and compare. There is no
- // useful way to incorporate a negative char class into this scheme
- // so we just conservatively create a mask and value that will always
- // succeed.
- pos->mask = 0;
- pos->value = 0;
- } else {
- int first_range = 0;
- while (ranges->at(first_range).from() > char_mask) {
- first_range++;
- if (first_range == ranges->length()) {
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- }
- CharacterRange range = ranges->at(first_range);
- uc16 from = range.from();
- uc16 to = range.to();
- if (to > char_mask) {
- to = char_mask;
- }
- uint32_t differing_bits = (from ^ to);
- // A mask and compare is only perfect if the differing bits form a
- // number like 00011111 with one single block of trailing 1s.
- if ((differing_bits & (differing_bits + 1)) == 0 &&
- from + differing_bits == to) {
- pos->determines_perfectly = true;
- }
- uint32_t common_bits = ~SmearBitsRight(differing_bits);
- uint32_t bits = (from & common_bits);
- for (int i = first_range + 1; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- uc16 from = range.from();
- uc16 to = range.to();
- if (from > char_mask) continue;
- if (to > char_mask) to = char_mask;
- // Here we are combining more ranges into the mask and compare
- // value. With each new range the mask becomes more sparse and
- // so the chances of a false positive rise. A character class
- // with multiple ranges is assumed never to be equivalent to a
- // mask and compare operation.
- pos->determines_perfectly = false;
- uint32_t new_common_bits = (from ^ to);
- new_common_bits = ~SmearBitsRight(new_common_bits);
- common_bits &= new_common_bits;
- bits &= new_common_bits;
- uint32_t differing_bits = (from & common_bits) ^ bits;
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- characters_filled_in++;
- DCHECK(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- }
- DCHECK(characters_filled_in != details->characters());
- if (!details->cannot_match()) {
- on_success()-> GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- true);
- }
-}
-
-
-void QuickCheckDetails::Clear() {
- for (int i = 0; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ = 0;
-}
-
-
-void QuickCheckDetails::Advance(int by, bool one_byte) {
- if (by >= characters_ || by < 0) {
- DCHECK_IMPLIES(by < 0, characters_ == 0);
- Clear();
- return;
- }
- DCHECK_LE(characters_ - by, 4);
- DCHECK_LE(characters_, 4);
- for (int i = 0; i < characters_ - by; i++) {
- positions_[i] = positions_[by + i];
- }
- for (int i = characters_ - by; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ -= by;
- // We could change mask_ and value_ here but we would never advance unless
- // they had already been used in a check and they won't be used again because
- // it would gain us nothing. So there's no point.
-}
-
-
-void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
- DCHECK(characters_ == other->characters_);
- if (other->cannot_match_) {
- return;
- }
- if (cannot_match_) {
- *this = *other;
- return;
- }
- for (int i = from_index; i < characters_; i++) {
- QuickCheckDetails::Position* pos = positions(i);
- QuickCheckDetails::Position* other_pos = other->positions(i);
- if (pos->mask != other_pos->mask ||
- pos->value != other_pos->value ||
- !other_pos->determines_perfectly) {
- // Our mask-compare operation will be approximate unless we have the
- // exact same operation on both sides of the alternation.
- pos->determines_perfectly = false;
- }
- pos->mask &= other_pos->mask;
- pos->value &= pos->mask;
- other_pos->value &= pos->mask;
- uc16 differing_bits = (pos->value ^ other_pos->value);
- pos->mask &= ~differing_bits;
- pos->value &= pos->mask;
- }
-}
-
-
-class VisitMarker {
- public:
- explicit VisitMarker(NodeInfo* info) : info_(info) {
- DCHECK(!info->visited);
- info->visited = true;
- }
- ~VisitMarker() {
- info_->visited = false;
- }
- private:
- NodeInfo* info_;
-};
-
-RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- DCHECK(!info()->visited);
- VisitMarker marker(info());
- return FilterSuccessor(depth - 1);
-}
-
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
- RegExpNode* next = on_success_->FilterOneByte(depth - 1);
- if (next == nullptr) return set_replacement(nullptr);
- on_success_ = next;
- return set_replacement(this);
-}
-
-// We need to check for the following characters: 0x39C 0x3BC 0x178.
-static inline bool RangeContainsLatin1Equivalents(CharacterRange range) {
- // TODO(dcarney): this could be a lot more efficient.
- return range.Contains(0x039C) || range.Contains(0x03BC) ||
- range.Contains(0x0178);
-}
-
-
-static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
- for (int i = 0; i < ranges->length(); i++) {
- // TODO(dcarney): this could be a lot more efficient.
- if (RangeContainsLatin1Equivalents(ranges->at(i))) return true;
- }
- return false;
-}
-
-RegExpNode* TextNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- DCHECK(!info()->visited);
- VisitMarker marker(info());
- int element_count = elements()->length();
- for (int i = 0; i < element_count; i++) {
- TextElement elm = elements()->at(i);
- if (elm.text_type() == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.atom()->data();
- for (int j = 0; j < quarks.length(); j++) {
- uint16_t c = quarks[j];
- if (elm.atom()->ignore_case()) {
- c = unibrow::Latin1::TryConvertToLatin1(c);
- }
- if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
- // Replace quark in case we converted to Latin-1.
- uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.begin());
- writable_quarks[j] = c;
- }
- } else {
- DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
- RegExpCharacterClass* cc = elm.char_class();
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- CharacterRange::Canonicalize(ranges);
- // Now they are in order so we only need to look at the first.
- int range_count = ranges->length();
- if (cc->is_negated()) {
- if (range_count != 0 &&
- ranges->at(0).from() == 0 &&
- ranges->at(0).to() >= String::kMaxOneByteCharCode) {
- // This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
- continue;
- return set_replacement(nullptr);
- }
- } else {
- if (range_count == 0 ||
- ranges->at(0).from() > String::kMaxOneByteCharCode) {
- // This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
- continue;
- return set_replacement(nullptr);
- }
- }
- }
- }
- return FilterSuccessor(depth - 1);
-}
-
-RegExpNode* LoopChoiceNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- {
- VisitMarker marker(info());
-
- RegExpNode* continue_replacement = continue_node_->FilterOneByte(depth - 1);
- // If we can't continue after the loop then there is no sense in doing the
- // loop.
- if (continue_replacement == nullptr) return set_replacement(nullptr);
- }
-
- return ChoiceNode::FilterOneByte(depth - 1);
-}
-
-RegExpNode* ChoiceNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- VisitMarker marker(info());
- int choice_count = alternatives_->length();
-
- for (int i = 0; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- if (alternative.guards() != nullptr &&
- alternative.guards()->length() != 0) {
- set_replacement(this);
- return this;
- }
- }
-
- int surviving = 0;
- RegExpNode* survivor = nullptr;
- for (int i = 0; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- RegExpNode* replacement = alternative.node()->FilterOneByte(depth - 1);
- DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK.
- if (replacement != nullptr) {
- alternatives_->at(i).set_node(replacement);
- surviving++;
- survivor = replacement;
- }
- }
- if (surviving < 2) return set_replacement(survivor);
-
- set_replacement(this);
- if (surviving == choice_count) {
- return this;
- }
- // Only some of the nodes survived the filtering. We need to rebuild the
- // alternatives list.
- ZoneList<GuardedAlternative>* new_alternatives =
- new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
- for (int i = 0; i < choice_count; i++) {
- RegExpNode* replacement =
- alternatives_->at(i).node()->FilterOneByte(depth - 1);
- if (replacement != nullptr) {
- alternatives_->at(i).set_node(replacement);
- new_alternatives->Add(alternatives_->at(i), zone());
- }
- }
- alternatives_ = new_alternatives;
- return this;
-}
-
-RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- VisitMarker marker(info());
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- RegExpNode* replacement = node->FilterOneByte(depth - 1);
- if (replacement == nullptr) return set_replacement(nullptr);
- alternatives_->at(1).set_node(replacement);
-
- RegExpNode* neg_node = alternatives_->at(0).node();
- RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1);
- // If the negative lookahead is always going to fail then
- // we don't need to check it.
- if (neg_replacement == nullptr) return set_replacement(replacement);
- alternatives_->at(0).set_node(neg_replacement);
- return set_replacement(this);
-}
-
-
-void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- if (body_can_be_zero_length_ || info()->visited) return;
- VisitMarker marker(info());
- return ChoiceNode::GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
-}
-
-
-void LoopChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- if (body_can_be_zero_length_ || budget <= 0) {
- bm->SetRest(offset);
- SaveBMInfo(bm, not_at_start, offset);
- return;
- }
- ChoiceNode::FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- not_at_start = (not_at_start || not_at_start_);
- int choice_count = alternatives_->length();
- DCHECK_LT(0, choice_count);
- alternatives_->at(0).node()->GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
- for (int i = 1; i < choice_count; i++) {
- QuickCheckDetails new_details(details->characters());
- RegExpNode* node = alternatives_->at(i).node();
- node->GetQuickCheckDetails(&new_details, compiler,
- characters_filled_in,
- not_at_start);
- // Here we merge the quick match details of the two branches.
- details->Merge(&new_details, characters_filled_in);
- }
-}
-
-
-// Check for [0-9A-Z_a-z].
-static void EmitWordCheck(RegExpMacroAssembler* assembler,
- Label* word,
- Label* non_word,
- bool fall_through_on_word) {
- if (assembler->CheckSpecialCharacterClass(
- fall_through_on_word ? 'w' : 'W',
- fall_through_on_word ? non_word : word)) {
- // Optimized implementation available.
- return;
- }
- assembler->CheckCharacterGT('z', non_word);
- assembler->CheckCharacterLT('0', non_word);
- assembler->CheckCharacterGT('a' - 1, word);
- assembler->CheckCharacterLT('9' + 1, word);
- assembler->CheckCharacterLT('A', non_word);
- assembler->CheckCharacterLT('Z' + 1, word);
- if (fall_through_on_word) {
- assembler->CheckNotCharacter('_', non_word);
- } else {
- assembler->CheckCharacter('_', word);
- }
-}
-
-
-// Emit the code to check for a ^ in multiline mode (1-character lookbehind
-// that matches newline or the start of input).
-static void EmitHat(RegExpCompiler* compiler,
- RegExpNode* on_success,
- Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- // We will be loading the previous character into the current character
- // register.
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label ok;
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a newline in this context, so skip to
- // ok if we are at the start.
- assembler->CheckAtStart(&ok);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
- new_trace.backtrack(),
- false);
- if (!assembler->CheckSpecialCharacterClass('n',
- new_trace.backtrack())) {
- // Newline means \n, \r, 0x2028 or 0x2029.
- if (!compiler->one_byte()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xFFFE, &ok);
- }
- assembler->CheckCharacter('\n', &ok);
- assembler->CheckNotCharacter('\r', new_trace.backtrack());
- }
- assembler->Bind(&ok);
- on_success->Emit(compiler, &new_trace);
-}
-
-
-// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
-void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Isolate* isolate = assembler->isolate();
- Trace::TriBool next_is_word_character = Trace::UNKNOWN;
- bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
- BoyerMooreLookahead* lookahead = bm_info(not_at_start);
- if (lookahead == nullptr) {
- int eats_at_least =
- Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(kMaxLookaheadForBoyerMoore,
- kRecursionBudget,
- not_at_start));
- if (eats_at_least >= 1) {
- BoyerMooreLookahead* bm =
- new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
- FillInBMInfo(isolate, 0, kRecursionBudget, bm, not_at_start);
- if (bm->at(0)->is_non_word())
- next_is_word_character = Trace::FALSE_VALUE;
- if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
- }
- } else {
- if (lookahead->at(0)->is_non_word())
- next_is_word_character = Trace::FALSE_VALUE;
- if (lookahead->at(0)->is_word())
- next_is_word_character = Trace::TRUE_VALUE;
- }
- bool at_boundary = (assertion_type_ == AssertionNode::AT_BOUNDARY);
- if (next_is_word_character == Trace::UNKNOWN) {
- Label before_non_word;
- Label before_word;
- if (trace->characters_preloaded() != 1) {
- assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
- }
- // Fall through on non-word.
- EmitWordCheck(assembler, &before_word, &before_non_word, false);
- // Next character is not a word character.
- assembler->Bind(&before_non_word);
- Label ok;
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
- assembler->GoTo(&ok);
-
- assembler->Bind(&before_word);
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
- assembler->Bind(&ok);
- } else if (next_is_word_character == Trace::TRUE_VALUE) {
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
- } else {
- DCHECK(next_is_word_character == Trace::FALSE_VALUE);
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
- }
-}
-
-
-void AssertionNode::BacktrackIfPrevious(
- RegExpCompiler* compiler,
- Trace* trace,
- AssertionNode::IfPrevious backtrack_if_previous) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label fall_through, dummy;
-
- Label* non_word = backtrack_if_previous == kIsNonWord ?
- new_trace.backtrack() :
- &fall_through;
- Label* word = backtrack_if_previous == kIsNonWord ?
- &fall_through :
- new_trace.backtrack();
-
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a non-word character, so the question is
- // decided if we are at the start.
- assembler->CheckAtStart(non_word);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1, &dummy, false);
- EmitWordCheck(assembler, word, non_word, backtrack_if_previous == kIsNonWord);
-
- assembler->Bind(&fall_through);
- on_success()->Emit(compiler, &new_trace);
-}
-
-
-void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- if (assertion_type_ == AT_START && not_at_start) {
- details->set_cannot_match();
- return;
- }
- return on_success()->GetQuickCheckDetails(details,
- compiler,
- filled_in,
- not_at_start);
-}
-
-
-void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- switch (assertion_type_) {
- case AT_END: {
- Label ok;
- assembler->CheckPosition(trace->cp_offset(), &ok);
- assembler->GoTo(trace->backtrack());
- assembler->Bind(&ok);
- break;
- }
- case AT_START: {
- if (trace->at_start() == Trace::FALSE_VALUE) {
- assembler->GoTo(trace->backtrack());
- return;
- }
- if (trace->at_start() == Trace::UNKNOWN) {
- assembler->CheckNotAtStart(trace->cp_offset(), trace->backtrack());
- Trace at_start_trace = *trace;
- at_start_trace.set_at_start(Trace::TRUE_VALUE);
- on_success()->Emit(compiler, &at_start_trace);
- return;
- }
- }
- break;
- case AFTER_NEWLINE:
- EmitHat(compiler, on_success(), trace);
- return;
- case AT_BOUNDARY:
- case AT_NON_BOUNDARY: {
- EmitBoundaryCheck(compiler, trace);
- return;
- }
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
- if (quick_check == nullptr) return false;
- if (offset >= quick_check->characters()) return false;
- return quick_check->positions(offset)->determines_perfectly;
-}
-
-
-static void UpdateBoundsCheck(int index, int* checked_up_to) {
- if (index > *checked_up_to) {
- *checked_up_to = index;
- }
-}
-
-
-// We call this repeatedly to generate code for each pass over the text node.
-// The passes are in increasing order of difficulty because we hope one
-// of the first passes will fail in which case we are saved the work of the
-// later passes. for example for the case independent regexp /%[asdfghjkl]a/
-// we will check the '%' in the first pass, the case independent 'a' in the
-// second pass and the character class in the last pass.
-//
-// The passes are done from right to left, so for example to test for /bar/
-// we will first test for an 'r' with offset 2, then an 'a' with offset 1
-// and then a 'b' with offset 0. This means we can avoid the end-of-input
-// bounds check most of the time. In the example we only need to check for
-// end-of-input when loading the putative 'r'.
-//
-// A slight complication involves the fact that the first character may already
-// be fetched into a register by the previous node. In this case we want to
-// do the test for that character first. We do this in separate passes. The
-// 'preloaded' argument indicates that we are doing such a 'pass'. If such a
-// pass has been performed then subsequent passes will have true in
-// first_element_checked to indicate that that character does not need to be
-// checked again.
-//
-// In addition to all this we are passed a Trace, which can
-// contain an AlternativeGeneration object. In this AlternativeGeneration
-// object we can see details of any quick check that was already passed in
-// order to get to the code we are now generating. The quick check can involve
-// loading characters, which means we do not need to recheck the bounds
-// up to the limit the quick check already checked. In addition the quick
-// check can have involved a mask and compare operation which may simplify
-// or obviate the need for further checks at some character positions.
-void TextNode::TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Isolate* isolate = assembler->isolate();
- bool one_byte = compiler->one_byte();
- Label* backtrack = trace->backtrack();
- QuickCheckDetails* quick_check = trace->quick_check_performed();
- int element_count = elements()->length();
- int backward_offset = read_backward() ? -Length() : 0;
- for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
- TextElement elm = elements()->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
- if (elm.text_type() == TextElement::ATOM) {
- if (SkipPass(pass, elm.atom()->ignore_case())) continue;
- Vector<const uc16> quarks = elm.atom()->data();
- for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
- if (first_element_checked && i == 0 && j == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
- EmitCharacterFunction* emit_function = nullptr;
- uc16 quark = quarks[j];
- if (elm.atom()->ignore_case()) {
- // Everywhere else we assume that a non-Latin-1 character cannot match
- // a Latin-1 character. Avoid the cases where this is assumption is
- // invalid by using the Latin1 equivalent instead.
- quark = unibrow::Latin1::TryConvertToLatin1(quark);
- }
- switch (pass) {
- case NON_LATIN1_MATCH:
- DCHECK(one_byte);
- if (quark > String::kMaxOneByteCharCode) {
- assembler->GoTo(backtrack);
- return;
- }
- break;
- case NON_LETTER_CHARACTER_MATCH:
- emit_function = &EmitAtomNonLetter;
- break;
- case SIMPLE_CHARACTER_MATCH:
- emit_function = &EmitSimpleCharacter;
- break;
- case CASE_CHARACTER_MATCH:
- emit_function = &EmitAtomLetter;
- break;
- default:
- break;
- }
- if (emit_function != nullptr) {
- bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
- bool bound_checked =
- emit_function(isolate, compiler, quark, backtrack, cp_offset + j,
- bounds_check, preloaded);
- if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
- }
- }
- } else {
- DCHECK_EQ(TextElement::CHAR_CLASS, elm.text_type());
- if (pass == CHARACTER_CLASS_MATCH) {
- if (first_element_checked && i == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
- RegExpCharacterClass* cc = elm.char_class();
- bool bounds_check = *checked_up_to < cp_offset || read_backward();
- EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
- bounds_check, preloaded, zone());
- UpdateBoundsCheck(cp_offset, checked_up_to);
- }
- }
- }
-}
-
-
-int TextNode::Length() {
- TextElement elm = elements()->last();
- DCHECK_LE(0, elm.cp_offset());
- return elm.cp_offset() + elm.length();
-}
-
-bool TextNode::SkipPass(TextEmitPassType pass, bool ignore_case) {
- if (ignore_case) {
- return pass == SIMPLE_CHARACTER_MATCH;
- } else {
- return pass == NON_LETTER_CHARACTER_MATCH || pass == CASE_CHARACTER_MATCH;
- }
-}
-
-TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
- ZoneList<CharacterRange>* ranges,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
- DCHECK_NOT_NULL(ranges);
- ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(zone, ranges, flags)),
- zone);
- return new (zone) TextNode(elms, read_backward, on_success);
-}
-
-TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
- CharacterRange trail,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
- ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
- ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
- ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(zone, lead_ranges, flags)),
- zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(zone, trail_ranges, flags)),
- zone);
- return new (zone) TextNode(elms, read_backward, on_success);
-}
-
-
-// This generates the code to match a text node. A text node can contain
-// straight character sequences (possibly to be matched in a case-independent
-// way) and character classes. For efficiency we do not do this in a single
-// pass from left to right. Instead we pass over the text node several times,
-// emitting code for some character positions every time. See the comment on
-// TextEmitPass for details.
-void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- return;
- }
-
- if (compiler->one_byte()) {
- int dummy = 0;
- TextEmitPass(compiler, NON_LATIN1_MATCH, false, trace, false, &dummy);
- }
-
- bool first_elt_done = false;
- int bound_checked_to = trace->cp_offset() - 1;
- bound_checked_to += trace->bound_checked_up_to();
-
- // If a character is preloaded into the current character register then
- // check that now.
- if (trace->characters_preloaded() == 1) {
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), true, trace,
- false, &bound_checked_to);
- }
- first_elt_done = true;
- }
-
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), false, trace,
- first_elt_done, &bound_checked_to);
- }
-
- Trace successor_trace(*trace);
- // If we advance backward, we may end up at the start.
- successor_trace.AdvanceCurrentPositionInTrace(
- read_backward() ? -Length() : Length(), compiler);
- successor_trace.set_at_start(read_backward() ? Trace::UNKNOWN
- : Trace::FALSE_VALUE);
- RecursionCheck rc(compiler);
- on_success()->Emit(compiler, &successor_trace);
-}
-
-
-void Trace::InvalidateCurrentCharacter() {
- characters_preloaded_ = 0;
-}
-
-
-void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
- // We don't have an instruction for shifting the current character register
- // down or for using a shifted value for anything so lets just forget that
- // we preloaded any characters into it.
- characters_preloaded_ = 0;
- // Adjust the offsets of the quick check performed information. This
- // information is used to find out what we already determined about the
- // characters by means of mask and compare.
- quick_check_performed_.Advance(by, compiler->one_byte());
- cp_offset_ += by;
- if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- cp_offset_ = 0;
- }
- bound_checked_up_to_ = Max(0, bound_checked_up_to_ - by);
-}
-
-
-void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
- int element_count = elements()->length();
- for (int i = 0; i < element_count; i++) {
- TextElement elm = elements()->at(i);
- if (elm.text_type() == TextElement::CHAR_CLASS) {
- RegExpCharacterClass* cc = elm.char_class();
-#ifdef V8_INTL_SUPPORT
- bool case_equivalents_already_added =
- NeedsUnicodeCaseEquivalents(cc->flags());
-#else
- bool case_equivalents_already_added = false;
-#endif
- if (IgnoreCase(cc->flags()) && !case_equivalents_already_added) {
- // None of the standard character classes is different in the case
- // independent case and it slows us down if we don't know that.
- if (cc->is_standard(zone())) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- CharacterRange::AddCaseEquivalents(isolate, zone(), ranges,
- is_one_byte);
- }
- }
- }
-}
-
-
-int TextNode::GreedyLoopTextLength() { return Length(); }
-
-
-RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) {
- if (read_backward()) return nullptr;
- if (elements()->length() != 1) return nullptr;
- TextElement elm = elements()->at(0);
- if (elm.text_type() != TextElement::CHAR_CLASS) return nullptr;
- RegExpCharacterClass* node = elm.char_class();
- ZoneList<CharacterRange>* ranges = node->ranges(zone());
- CharacterRange::Canonicalize(ranges);
- if (node->is_negated()) {
- return ranges->length() == 0 ? on_success() : nullptr;
- }
- if (ranges->length() != 1) return nullptr;
- uint32_t max_char;
- if (compiler->one_byte()) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
- return ranges->at(0).IsEverything(max_char) ? on_success() : nullptr;
-}
-
-
-// Finds the fixed match length of a sequence of nodes that goes from
-// this alternative and back to this choice node. If there are variable
-// length nodes or other complications in the way then return a sentinel
-// value indicating that a greedy loop cannot be constructed.
-int ChoiceNode::GreedyLoopTextLengthForAlternative(
- GuardedAlternative* alternative) {
- int length = 0;
- RegExpNode* node = alternative->node();
- // Later we will generate code for all these text nodes using recursion
- // so we have to limit the max number.
- int recursion_depth = 0;
- while (node != this) {
- if (recursion_depth++ > RegExpCompiler::kMaxRecursion) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- int node_length = node->GreedyLoopTextLength();
- if (node_length == kNodeIsTooComplexForGreedyLoops) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- length += node_length;
- SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
- node = seq_node->on_success();
- }
- return read_backward() ? -length : length;
-}
-
-
-void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
- DCHECK_NULL(loop_node_);
- AddAlternative(alt);
- loop_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
- DCHECK_NULL(continue_node_);
- AddAlternative(alt);
- continue_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->stop_node() == this) {
- // Back edge of greedy optimized loop node graph.
- int text_length =
- GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
- DCHECK_NE(kNodeIsTooComplexForGreedyLoops, text_length);
- // Update the counter-based backtracking info on the stack. This is an
- // optimization for greedy loops (see below).
- DCHECK(trace->cp_offset() == text_length);
- macro_assembler->AdvanceCurrentPosition(text_length);
- macro_assembler->GoTo(trace->loop_label());
- return;
- }
- DCHECK_NULL(trace->stop_node());
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- ChoiceNode::Emit(compiler, trace);
-}
-
-
-int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
- int eats_at_least) {
- int preload_characters = Min(4, eats_at_least);
- DCHECK_LE(preload_characters, 4);
- if (compiler->macro_assembler()->CanReadUnaligned()) {
- bool one_byte = compiler->one_byte();
- if (one_byte) {
- // We can't preload 3 characters because there is no machine instruction
- // to do that. We can't just load 4 because we could be reading
- // beyond the end of the string, which could cause a memory fault.
- if (preload_characters == 3) preload_characters = 2;
- } else {
- if (preload_characters > 2) preload_characters = 2;
- }
- } else {
- if (preload_characters > 1) preload_characters = 1;
- }
- return preload_characters;
-}
-
-
-// This class is used when generating the alternatives in a choice node. It
-// records the way the alternative is being code generated.
-class AlternativeGeneration: public Malloced {
- public:
- AlternativeGeneration()
- : possible_success(),
- expects_preload(false),
- after(),
- quick_check_details() { }
- Label possible_success;
- bool expects_preload;
- Label after;
- QuickCheckDetails quick_check_details;
-};
-
-
-// Creates a list of AlternativeGenerations. If the list has a reasonable
-// size then it is on the stack, otherwise the excess is on the heap.
-class AlternativeGenerationList {
- public:
- AlternativeGenerationList(int count, Zone* zone)
- : alt_gens_(count, zone) {
- for (int i = 0; i < count && i < kAFew; i++) {
- alt_gens_.Add(a_few_alt_gens_ + i, zone);
- }
- for (int i = kAFew; i < count; i++) {
- alt_gens_.Add(new AlternativeGeneration(), zone);
- }
- }
- ~AlternativeGenerationList() {
- for (int i = kAFew; i < alt_gens_.length(); i++) {
- delete alt_gens_[i];
- alt_gens_[i] = nullptr;
- }
- }
-
- AlternativeGeneration* at(int i) {
- return alt_gens_[i];
- }
-
- private:
- static const int kAFew = 10;
- ZoneList<AlternativeGeneration*> alt_gens_;
- AlternativeGeneration a_few_alt_gens_[kAFew];
-};
-
-
-static const uc32 kRangeEndMarker = 0x110000;
-
-// The '2' variant is has inclusive from and exclusive to.
-// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
-// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
-static const int kSpaceRanges[] = {
- '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0, 0x00A1, 0x1680,
- 0x1681, 0x2000, 0x200B, 0x2028, 0x202A, 0x202F, 0x2030,
- 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, kRangeEndMarker};
-static const int kSpaceRangeCount = arraysize(kSpaceRanges);
-
-static const int kWordRanges[] = {
- '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, kRangeEndMarker};
-static const int kWordRangeCount = arraysize(kWordRanges);
-static const int kDigitRanges[] = {'0', '9' + 1, kRangeEndMarker};
-static const int kDigitRangeCount = arraysize(kDigitRanges);
-static const int kSurrogateRanges[] = {
- kLeadSurrogateStart, kLeadSurrogateStart + 1, kRangeEndMarker};
-static const int kSurrogateRangeCount = arraysize(kSurrogateRanges);
-static const int kLineTerminatorRanges[] = {
- 0x000A, 0x000B, 0x000D, 0x000E, 0x2028, 0x202A, kRangeEndMarker};
-static const int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
-
-void BoyerMoorePositionInfo::Set(int character) {
- SetInterval(Interval(character, character));
-}
-
-
-void BoyerMoorePositionInfo::SetInterval(const Interval& interval) {
- s_ = AddRange(s_, kSpaceRanges, kSpaceRangeCount, interval);
- w_ = AddRange(w_, kWordRanges, kWordRangeCount, interval);
- d_ = AddRange(d_, kDigitRanges, kDigitRangeCount, interval);
- surrogate_ =
- AddRange(surrogate_, kSurrogateRanges, kSurrogateRangeCount, interval);
- if (interval.to() - interval.from() >= kMapSize - 1) {
- if (map_count_ != kMapSize) {
- map_count_ = kMapSize;
- for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
- }
- return;
- }
- for (int i = interval.from(); i <= interval.to(); i++) {
- int mod_character = (i & kMask);
- if (!map_->at(mod_character)) {
- map_count_++;
- map_->at(mod_character) = true;
- }
- if (map_count_ == kMapSize) return;
- }
-}
-
-
-void BoyerMoorePositionInfo::SetAll() {
- s_ = w_ = d_ = kLatticeUnknown;
- if (map_count_ != kMapSize) {
- map_count_ = kMapSize;
- for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
- }
-}
-
-
-BoyerMooreLookahead::BoyerMooreLookahead(
- int length, RegExpCompiler* compiler, Zone* zone)
- : length_(length),
- compiler_(compiler) {
- if (compiler->one_byte()) {
- max_char_ = String::kMaxOneByteCharCode;
- } else {
- max_char_ = String::kMaxUtf16CodeUnit;
- }
- bitmaps_ = new(zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
- for (int i = 0; i < length; i++) {
- bitmaps_->Add(new(zone) BoyerMoorePositionInfo(zone), zone);
- }
-}
-
-
-// Find the longest range of lookahead that has the fewest number of different
-// characters that can occur at a given position. Since we are optimizing two
-// different parameters at once this is a tradeoff.
-bool BoyerMooreLookahead::FindWorthwhileInterval(int* from, int* to) {
- int biggest_points = 0;
- // If more than 32 characters out of 128 can occur it is unlikely that we can
- // be lucky enough to step forwards much of the time.
- const int kMaxMax = 32;
- for (int max_number_of_chars = 4;
- max_number_of_chars < kMaxMax;
- max_number_of_chars *= 2) {
- biggest_points =
- FindBestInterval(max_number_of_chars, biggest_points, from, to);
- }
- if (biggest_points == 0) return false;
- return true;
-}
-
-
-// Find the highest-points range between 0 and length_ where the character
-// information is not too vague. 'Too vague' means that there are more than
-// max_number_of_chars that can occur at this position. Calculates the number
-// of points as the product of width-of-the-range and
-// probability-of-finding-one-of-the-characters, where the probability is
-// calculated using the frequency distribution of the sample subject string.
-int BoyerMooreLookahead::FindBestInterval(
- int max_number_of_chars, int old_biggest_points, int* from, int* to) {
- int biggest_points = old_biggest_points;
- static const int kSize = RegExpMacroAssembler::kTableSize;
- for (int i = 0; i < length_; ) {
- while (i < length_ && Count(i) > max_number_of_chars) i++;
- if (i == length_) break;
- int remembered_from = i;
- bool union_map[kSize];
- for (int j = 0; j < kSize; j++) union_map[j] = false;
- while (i < length_ && Count(i) <= max_number_of_chars) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- for (int j = 0; j < kSize; j++) union_map[j] |= map->at(j);
- i++;
- }
- int frequency = 0;
- for (int j = 0; j < kSize; j++) {
- if (union_map[j]) {
- // Add 1 to the frequency to give a small per-character boost for
- // the cases where our sampling is not good enough and many
- // characters have a frequency of zero. This means the frequency
- // can theoretically be up to 2*kSize though we treat it mostly as
- // a fraction of kSize.
- frequency += compiler_->frequency_collator()->Frequency(j) + 1;
- }
- }
- // We use the probability of skipping times the distance we are skipping to
- // judge the effectiveness of this. Actually we have a cut-off: By
- // dividing by 2 we switch off the skipping if the probability of skipping
- // is less than 50%. This is because the multibyte mask-and-compare
- // skipping in quickcheck is more likely to do well on this case.
- bool in_quickcheck_range =
- ((i - remembered_from < 4) ||
- (compiler_->one_byte() ? remembered_from <= 4 : remembered_from <= 2));
- // Called 'probability' but it is only a rough estimate and can actually
- // be outside the 0-kSize range.
- int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
- int points = (i - remembered_from) * probability;
- if (points > biggest_points) {
- *from = remembered_from;
- *to = i - 1;
- biggest_points = points;
- }
- }
- return biggest_points;
-}
-
-
-// Take all the characters that will not prevent a successful match if they
-// occur in the subject string in the range between min_lookahead and
-// max_lookahead (inclusive) measured from the current position. If the
-// character at max_lookahead offset is not one of these characters, then we
-// can safely skip forwards by the number of characters in the range.
-int BoyerMooreLookahead::GetSkipTable(int min_lookahead,
- int max_lookahead,
- Handle<ByteArray> boolean_skip_table) {
- const int kSize = RegExpMacroAssembler::kTableSize;
-
- const int kSkipArrayEntry = 0;
- const int kDontSkipArrayEntry = 1;
-
- for (int i = 0; i < kSize; i++) {
- boolean_skip_table->set(i, kSkipArrayEntry);
- }
- int skip = max_lookahead + 1 - min_lookahead;
-
- for (int i = max_lookahead; i >= min_lookahead; i--) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- for (int j = 0; j < kSize; j++) {
- if (map->at(j)) {
- boolean_skip_table->set(j, kDontSkipArrayEntry);
- }
- }
- }
-
- return skip;
-}
-
-
-// See comment above on the implementation of GetSkipTable.
-void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
- const int kSize = RegExpMacroAssembler::kTableSize;
-
- int min_lookahead = 0;
- int max_lookahead = 0;
-
- if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return;
-
- bool found_single_character = false;
- int single_character = 0;
- for (int i = max_lookahead; i >= min_lookahead; i--) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- if (map->map_count() > 1 ||
- (found_single_character && map->map_count() != 0)) {
- found_single_character = false;
- break;
- }
- for (int j = 0; j < kSize; j++) {
- if (map->at(j)) {
- found_single_character = true;
- single_character = j;
- break;
- }
- }
- }
-
- int lookahead_width = max_lookahead + 1 - min_lookahead;
-
- if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
- // The mask-compare can probably handle this better.
- return;
- }
-
- if (found_single_character) {
- Label cont, again;
- masm->Bind(&again);
- masm->LoadCurrentCharacter(max_lookahead, &cont, true);
- if (max_char_ > kSize) {
- masm->CheckCharacterAfterAnd(single_character,
- RegExpMacroAssembler::kTableMask,
- &cont);
- } else {
- masm->CheckCharacter(single_character, &cont);
- }
- masm->AdvanceCurrentPosition(lookahead_width);
- masm->GoTo(&again);
- masm->Bind(&cont);
- return;
- }
-
- Factory* factory = masm->isolate()->factory();
- Handle<ByteArray> boolean_skip_table =
- factory->NewByteArray(kSize, AllocationType::kOld);
- int skip_distance = GetSkipTable(
- min_lookahead, max_lookahead, boolean_skip_table);
- DCHECK_NE(0, skip_distance);
-
- Label cont, again;
- masm->Bind(&again);
- masm->LoadCurrentCharacter(max_lookahead, &cont, true);
- masm->CheckBitInTable(boolean_skip_table, &cont);
- masm->AdvanceCurrentPosition(skip_distance);
- masm->GoTo(&again);
- masm->Bind(&cont);
-}
-
-
-/* Code generation for choice nodes.
- *
- * We generate quick checks that do a mask and compare to eliminate a
- * choice. If the quick check succeeds then it jumps to the continuation to
- * do slow checks and check subsequent nodes. If it fails (the common case)
- * it falls through to the next choice.
- *
- * Here is the desired flow graph. Nodes directly below each other imply
- * fallthrough. Alternatives 1 and 2 have quick checks. Alternative
- * 3 doesn't have a quick check so we have to call the slow check.
- * Nodes are marked Qn for quick checks and Sn for slow checks. The entire
- * regexp continuation is generated directly after the Sn node, up to the
- * next GoTo if we decide to reuse some already generated code. Some
- * nodes expect preload_characters to be preloaded into the current
- * character register. R nodes do this preloading. Vertices are marked
- * F for failures and S for success (possible success in the case of quick
- * nodes). L, V, < and > are used as arrow heads.
- *
- * ----------> R
- * |
- * V
- * Q1 -----> S1
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * Q2 -----> S2
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * S3
- * |
- * F|
- * |
- * R
- * |
- * backtrack V
- * <----------Q4
- * \ F |
- * \ |S
- * \ F V
- * \-----S4
- *
- * For greedy loops we push the current position, then generate the code that
- * eats the input specially in EmitGreedyLoop. The other choice (the
- * continuation) is generated by the normal code in EmitChoices, and steps back
- * in the input to the starting position when it fails to match. The loop code
- * looks like this (U is the unwind code that steps back in the greedy loop).
- *
- * _____
- * / \
- * V |
- * ----------> S1 |
- * /| |
- * / |S |
- * F/ \_____/
- * /
- * |<-----
- * | \
- * V |S
- * Q2 ---> U----->backtrack
- * | F /
- * S| /
- * V F /
- * S2--/
- */
-
-GreedyLoopState::GreedyLoopState(bool not_at_start) {
- counter_backtrack_trace_.set_backtrack(&label_);
- if (not_at_start) counter_backtrack_trace_.set_at_start(Trace::FALSE_VALUE);
-}
-
-
-void ChoiceNode::AssertGuardsMentionRegisters(Trace* trace) {
-#ifdef DEBUG
- int choice_count = alternatives_->length();
- for (int i = 0; i < choice_count - 1; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == nullptr) ? 0 : guards->length();
- for (int j = 0; j < guard_count; j++) {
- DCHECK(!trace->mentions_reg(guards->at(j)->reg()));
- }
- }
-#endif
-}
-
-
-void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
- Trace* current_trace,
- PreloadState* state) {
- if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
- // Save some time by looking at most one machine word ahead.
- state->eats_at_least_ =
- EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
- current_trace->at_start() == Trace::FALSE_VALUE);
- }
- state->preload_characters_ =
- CalculatePreloadCharacters(compiler, state->eats_at_least_);
-
- state->preload_is_current_ =
- (current_trace->characters_preloaded() == state->preload_characters_);
- state->preload_has_checked_bounds_ = state->preload_is_current_;
-}
-
-
-void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- int choice_count = alternatives_->length();
-
- if (choice_count == 1 && alternatives_->at(0).guards() == nullptr) {
- alternatives_->at(0).node()->Emit(compiler, trace);
- return;
- }
-
- AssertGuardsMentionRegisters(trace);
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- // For loop nodes we already flushed (see LoopChoiceNode::Emit), but for
- // other choice nodes we only flush if we are out of code size budget.
- if (trace->flush_budget() == 0 && trace->actions() != nullptr) {
- trace->Flush(compiler, this);
- return;
- }
-
- RecursionCheck rc(compiler);
-
- PreloadState preload;
- preload.init();
- GreedyLoopState greedy_loop_state(not_at_start());
-
- int text_length = GreedyLoopTextLengthForAlternative(&alternatives_->at(0));
- AlternativeGenerationList alt_gens(choice_count, zone());
-
- if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
- trace = EmitGreedyLoop(compiler,
- trace,
- &alt_gens,
- &preload,
- &greedy_loop_state,
- text_length);
- } else {
- // TODO(erikcorry): Delete this. We don't need this label, but it makes us
- // match the traces produced pre-cleanup.
- Label second_choice;
- compiler->macro_assembler()->Bind(&second_choice);
-
- preload.eats_at_least_ = EmitOptimizedUnanchoredSearch(compiler, trace);
-
- EmitChoices(compiler,
- &alt_gens,
- 0,
- trace,
- &preload);
- }
-
- // At this point we need to generate slow checks for the alternatives where
- // the quick check was inlined. We can recognize these because the associated
- // label was bound.
- int new_flush_budget = trace->flush_budget() / choice_count;
- for (int i = 0; i < choice_count; i++) {
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- Trace new_trace(*trace);
- // If there are actions to be flushed we have to limit how many times
- // they are flushed. Take the budget of the parent trace and distribute
- // it fairly amongst the children.
- if (new_trace.actions() != nullptr) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- bool next_expects_preload =
- i == choice_count - 1 ? false : alt_gens.at(i + 1)->expects_preload;
- EmitOutOfLineContinuation(compiler,
- &new_trace,
- alternatives_->at(i),
- alt_gen,
- preload.preload_characters_,
- next_expects_preload);
- }
-}
-
-
-Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler,
- Trace* trace,
- AlternativeGenerationList* alt_gens,
- PreloadState* preload,
- GreedyLoopState* greedy_loop_state,
- int text_length) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- // Here we have special handling for greedy loops containing only text nodes
- // and other simple nodes. These are handled by pushing the current
- // position on the stack and then incrementing the current position each
- // time around the switch. On backtrack we decrement the current position
- // and check it against the pushed value. This avoids pushing backtrack
- // information for each iteration of the loop, which could take up a lot of
- // space.
- DCHECK(trace->stop_node() == nullptr);
- macro_assembler->PushCurrentPosition();
- Label greedy_match_failed;
- Trace greedy_match_trace;
- if (not_at_start()) greedy_match_trace.set_at_start(Trace::FALSE_VALUE);
- greedy_match_trace.set_backtrack(&greedy_match_failed);
- Label loop_label;
- macro_assembler->Bind(&loop_label);
- greedy_match_trace.set_stop_node(this);
- greedy_match_trace.set_loop_label(&loop_label);
- alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
- macro_assembler->Bind(&greedy_match_failed);
-
- Label second_choice; // For use in greedy matches.
- macro_assembler->Bind(&second_choice);
-
- Trace* new_trace = greedy_loop_state->counter_backtrack_trace();
-
- EmitChoices(compiler,
- alt_gens,
- 1,
- new_trace,
- preload);
-
- macro_assembler->Bind(greedy_loop_state->label());
- // If we have unwound to the bottom then backtrack.
- macro_assembler->CheckGreedyLoop(trace->backtrack());
- // Otherwise try the second priority at an earlier position.
- macro_assembler->AdvanceCurrentPosition(-text_length);
- macro_assembler->GoTo(&second_choice);
- return new_trace;
-}
-
-int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
- Trace* trace) {
- int eats_at_least = PreloadState::kEatsAtLeastNotYetInitialized;
- if (alternatives_->length() != 2) return eats_at_least;
-
- GuardedAlternative alt1 = alternatives_->at(1);
- if (alt1.guards() != nullptr && alt1.guards()->length() != 0) {
- return eats_at_least;
- }
- RegExpNode* eats_anything_node = alt1.node();
- if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) != this) {
- return eats_at_least;
- }
-
- // Really we should be creating a new trace when we execute this function,
- // but there is no need, because the code it generates cannot backtrack, and
- // we always arrive here with a trivial trace (since it's the entry to a
- // loop. That also implies that there are no preloaded characters, which is
- // good, because it means we won't be violating any assumptions by
- // overwriting those characters with new load instructions.
- DCHECK(trace->is_trivial());
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- Isolate* isolate = macro_assembler->isolate();
- // At this point we know that we are at a non-greedy loop that will eat
- // any character one at a time. Any non-anchored regexp has such a
- // loop prepended to it in order to find where it starts. We look for
- // a pattern of the form ...abc... where we can look 6 characters ahead
- // and step forwards 3 if the character is not one of abc. Abc need
- // not be atoms, they can be any reasonably limited character class or
- // small alternation.
- BoyerMooreLookahead* bm = bm_info(false);
- if (bm == nullptr) {
- eats_at_least = Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore,
- kRecursionBudget,
- false));
- if (eats_at_least >= 1) {
- bm = new(zone()) BoyerMooreLookahead(eats_at_least,
- compiler,
- zone());
- GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(isolate, 0, kRecursionBudget, bm, false);
- }
- }
- if (bm != nullptr) {
- bm->EmitSkipInstructions(macro_assembler);
- }
- return eats_at_least;
-}
-
-
-void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
- AlternativeGenerationList* alt_gens,
- int first_choice,
- Trace* trace,
- PreloadState* preload) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- SetUpPreLoad(compiler, trace, preload);
-
- // For now we just call all choices one after the other. The idea ultimately
- // is to use the Dispatch table to try only the relevant ones.
- int choice_count = alternatives_->length();
-
- int new_flush_budget = trace->flush_budget() / choice_count;
-
- for (int i = first_choice; i < choice_count; i++) {
- bool is_last = i == choice_count - 1;
- bool fall_through_on_failure = !is_last;
- GuardedAlternative alternative = alternatives_->at(i);
- AlternativeGeneration* alt_gen = alt_gens->at(i);
- alt_gen->quick_check_details.set_characters(preload->preload_characters_);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == nullptr) ? 0 : guards->length();
- Trace new_trace(*trace);
- new_trace.set_characters_preloaded(preload->preload_is_current_ ?
- preload->preload_characters_ :
- 0);
- if (preload->preload_has_checked_bounds_) {
- new_trace.set_bound_checked_up_to(preload->preload_characters_);
- }
- new_trace.quick_check_performed()->Clear();
- if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
- if (!is_last) {
- new_trace.set_backtrack(&alt_gen->after);
- }
- alt_gen->expects_preload = preload->preload_is_current_;
- bool generate_full_check_inline = false;
- if (compiler->optimize() &&
- try_to_emit_quick_check_for_alternative(i == 0) &&
- alternative.node()->EmitQuickCheck(
- compiler, trace, &new_trace, preload->preload_has_checked_bounds_,
- &alt_gen->possible_success, &alt_gen->quick_check_details,
- fall_through_on_failure)) {
- // Quick check was generated for this choice.
- preload->preload_is_current_ = true;
- preload->preload_has_checked_bounds_ = true;
- // If we generated the quick check to fall through on possible success,
- // we now need to generate the full check inline.
- if (!fall_through_on_failure) {
- macro_assembler->Bind(&alt_gen->possible_success);
- new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- new_trace.set_characters_preloaded(preload->preload_characters_);
- new_trace.set_bound_checked_up_to(preload->preload_characters_);
- generate_full_check_inline = true;
- }
- } else if (alt_gen->quick_check_details.cannot_match()) {
- if (!fall_through_on_failure) {
- macro_assembler->GoTo(trace->backtrack());
- }
- continue;
- } else {
- // No quick check was generated. Put the full code here.
- // If this is not the first choice then there could be slow checks from
- // previous cases that go here when they fail. There's no reason to
- // insist that they preload characters since the slow check we are about
- // to generate probably can't use it.
- if (i != first_choice) {
- alt_gen->expects_preload = false;
- new_trace.InvalidateCurrentCharacter();
- }
- generate_full_check_inline = true;
- }
- if (generate_full_check_inline) {
- if (new_trace.actions() != nullptr) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &new_trace);
- }
- alternative.node()->Emit(compiler, &new_trace);
- preload->preload_is_current_ = false;
- }
- macro_assembler->Bind(&alt_gen->after);
- }
-}
-
-
-void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload) {
- if (!alt_gen->possible_success.is_linked()) return;
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- macro_assembler->Bind(&alt_gen->possible_success);
- Trace out_of_line_trace(*trace);
- out_of_line_trace.set_characters_preloaded(preload_characters);
- out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE_VALUE);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == nullptr) ? 0 : guards->length();
- if (next_expects_preload) {
- Label reload_current_char;
- out_of_line_trace.set_backtrack(&reload_current_char);
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- macro_assembler->Bind(&reload_current_char);
- // Reload the current character, since the next quick check expects that.
- // We don't need to check bounds here because we only get into this
- // code through a quick check which already did the checked load.
- macro_assembler->LoadCurrentCharacter(trace->cp_offset(), nullptr, false,
- preload_characters);
- macro_assembler->GoTo(&(alt_gen->after));
- } else {
- out_of_line_trace.set_backtrack(&(alt_gen->after));
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- }
-}
-
-
-void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- switch (action_type_) {
- case STORE_POSITION: {
- Trace::DeferredCapture
- new_capture(data_.u_position_register.reg,
- data_.u_position_register.is_capture,
- trace);
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case INCREMENT_REGISTER: {
- Trace::DeferredIncrementRegister
- new_increment(data_.u_increment_register.reg);
- Trace new_trace = *trace;
- new_trace.add_action(&new_increment);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case SET_REGISTER: {
- Trace::DeferredSetRegister
- new_set(data_.u_store_register.reg, data_.u_store_register.value);
- Trace new_trace = *trace;
- new_trace.add_action(&new_set);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case CLEAR_CAPTURES: {
- Trace::DeferredClearCaptures
- new_capture(Interval(data_.u_clear_captures.range_from,
- data_.u_clear_captures.range_to));
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case BEGIN_SUBMATCH:
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- assembler->WriteCurrentPositionToRegister(
- data_.u_submatch.current_position_register, 0);
- assembler->WriteStackPointerToRegister(
- data_.u_submatch.stack_pointer_register);
- on_success()->Emit(compiler, trace);
- }
- break;
- case EMPTY_MATCH_CHECK: {
- int start_pos_reg = data_.u_empty_match_check.start_register;
- int stored_pos = 0;
- int rep_reg = data_.u_empty_match_check.repetition_register;
- bool has_minimum = (rep_reg != RegExpCompiler::kNoRegister);
- bool know_dist = trace->GetStoredPosition(start_pos_reg, &stored_pos);
- if (know_dist && !has_minimum && stored_pos == trace->cp_offset()) {
- // If we know we haven't advanced and there is no minimum we
- // can just backtrack immediately.
- assembler->GoTo(trace->backtrack());
- } else if (know_dist && stored_pos < trace->cp_offset()) {
- // If we know we've advanced we can generate the continuation
- // immediately.
- on_success()->Emit(compiler, trace);
- } else if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- Label skip_empty_check;
- // If we have a minimum number of repetitions we check the current
- // number first and skip the empty check if it's not enough.
- if (has_minimum) {
- int limit = data_.u_empty_match_check.repetition_limit;
- assembler->IfRegisterLT(rep_reg, limit, &skip_empty_check);
- }
- // If the match is empty we bail out, otherwise we fall through
- // to the on-success continuation.
- assembler->IfRegisterEqPos(data_.u_empty_match_check.start_register,
- trace->backtrack());
- assembler->Bind(&skip_empty_check);
- on_success()->Emit(compiler, trace);
- }
- break;
- }
- case POSITIVE_SUBMATCH_SUCCESS: {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- assembler->ReadCurrentPositionFromRegister(
- data_.u_submatch.current_position_register);
- assembler->ReadStackPointerFromRegister(
- data_.u_submatch.stack_pointer_register);
- int clear_register_count = data_.u_submatch.clear_register_count;
- if (clear_register_count == 0) {
- on_success()->Emit(compiler, trace);
- return;
- }
- int clear_registers_from = data_.u_submatch.clear_register_from;
- Label clear_registers_backtrack;
- Trace new_trace = *trace;
- new_trace.set_backtrack(&clear_registers_backtrack);
- on_success()->Emit(compiler, &new_trace);
-
- assembler->Bind(&clear_registers_backtrack);
- int clear_registers_to = clear_registers_from + clear_register_count - 1;
- assembler->ClearRegisters(clear_registers_from, clear_registers_to);
-
- DCHECK(trace->backtrack() == nullptr);
- assembler->Backtrack();
- return;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- DCHECK_EQ(start_reg_ + 1, end_reg_);
- if (IgnoreCase(flags_)) {
- assembler->CheckNotBackReferenceIgnoreCase(
- start_reg_, read_backward(), IsUnicode(flags_), trace->backtrack());
- } else {
- assembler->CheckNotBackReference(start_reg_, read_backward(),
- trace->backtrack());
- }
- // We are going to advance backward, so we may end up at the start.
- if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
-
- // Check that the back reference does not end inside a surrogate pair.
- if (IsUnicode(flags_) && !compiler->one_byte()) {
- assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-// -------------------------------------------------------------------
-// Dot/dotty output
-
-
-#ifdef DEBUG
-
-
-class DotPrinter: public NodeVisitor {
- public:
- DotPrinter(std::ostream& os, bool ignore_case) // NOLINT
- : os_(os),
- ignore_case_(ignore_case) {}
- void PrintNode(const char* label, RegExpNode* node);
- void Visit(RegExpNode* node);
- void PrintAttributes(RegExpNode* from);
- void PrintOnFailure(RegExpNode* from, RegExpNode* to);
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- private:
- std::ostream& os_;
- bool ignore_case_;
-};
-
-
-void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
- os_ << "digraph G {\n graph [label=\"";
- for (int i = 0; label[i]; i++) {
- switch (label[i]) {
- case '\\':
- os_ << "\\\\";
- break;
- case '"':
- os_ << "\"";
- break;
- default:
- os_ << label[i];
- break;
- }
- }
- os_ << "\"];\n";
- Visit(node);
- os_ << "}" << std::endl;
-}
-
-
-void DotPrinter::Visit(RegExpNode* node) {
- if (node->info()->visited) return;
- node->info()->visited = true;
- node->Accept(this);
-}
-
-
-void DotPrinter::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
- os_ << " n" << from << " -> n" << on_failure << " [style=dotted];\n";
- Visit(on_failure);
-}
-
-
-class TableEntryBodyPrinter {
- public:
- TableEntryBodyPrinter(std::ostream& os, ChoiceNode* choice) // NOLINT
- : os_(os),
- choice_(choice) {}
- void Call(uc16 from, DispatchTable::Entry entry) {
- OutSet* out_set = entry.out_set();
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- os_ << " n" << choice() << ":s" << from << "o" << i << " -> n"
- << choice()->alternatives()->at(i).node() << ";\n";
- }
- }
- }
- private:
- ChoiceNode* choice() { return choice_; }
- std::ostream& os_;
- ChoiceNode* choice_;
-};
-
-
-class TableEntryHeaderPrinter {
- public:
- explicit TableEntryHeaderPrinter(std::ostream& os) // NOLINT
- : first_(true),
- os_(os) {}
- void Call(uc16 from, DispatchTable::Entry entry) {
- if (first_) {
- first_ = false;
- } else {
- os_ << "|";
- }
- os_ << "{\\" << AsUC16(from) << "-\\" << AsUC16(entry.to()) << "|{";
- OutSet* out_set = entry.out_set();
- int priority = 0;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- if (priority > 0) os_ << "|";
- os_ << "<s" << from << "o" << i << "> " << priority;
- priority++;
- }
- }
- os_ << "}}";
- }
-
- private:
- bool first_;
- std::ostream& os_;
-};
-
-
-class AttributePrinter {
- public:
- explicit AttributePrinter(std::ostream& os) // NOLINT
- : os_(os),
- first_(true) {}
- void PrintSeparator() {
- if (first_) {
- first_ = false;
- } else {
- os_ << "|";
- }
- }
- void PrintBit(const char* name, bool value) {
- if (!value) return;
- PrintSeparator();
- os_ << "{" << name << "}";
- }
- void PrintPositive(const char* name, int value) {
- if (value < 0) return;
- PrintSeparator();
- os_ << "{" << name << "|" << value << "}";
- }
-
- private:
- std::ostream& os_;
- bool first_;
-};
-
-
-void DotPrinter::PrintAttributes(RegExpNode* that) {
- os_ << " a" << that << " [shape=Mrecord, color=grey, fontcolor=grey, "
- << "margin=0.1, fontsize=10, label=\"{";
- AttributePrinter printer(os_);
- NodeInfo* info = that->info();
- printer.PrintBit("NI", info->follows_newline_interest);
- printer.PrintBit("WI", info->follows_word_interest);
- printer.PrintBit("SI", info->follows_start_interest);
- Label* label = that->label();
- if (label->is_bound())
- printer.PrintPositive("@", label->pos());
- os_ << "}\"];\n"
- << " a" << that << " -> n" << that
- << " [style=dashed, color=grey, arrowhead=none];\n";
-}
-
-
-static const bool kPrintDispatchTable = false;
-void DotPrinter::VisitChoice(ChoiceNode* that) {
- if (kPrintDispatchTable) {
- os_ << " n" << that << " [shape=Mrecord, label=\"";
- TableEntryHeaderPrinter header_printer(os_);
- that->GetTable(ignore_case_)->ForEach(&header_printer);
- os_ << "\"]\n";
- PrintAttributes(that);
- TableEntryBodyPrinter body_printer(os_, that);
- that->GetTable(ignore_case_)->ForEach(&body_printer);
- } else {
- os_ << " n" << that << " [shape=Mrecord, label=\"?\"];\n";
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- os_ << " n" << that << " -> n" << alt.node();
- }
- }
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- alt.node()->Accept(this);
- }
-}
-
-
-void DotPrinter::VisitText(TextNode* that) {
- Zone* zone = that->zone();
- os_ << " n" << that << " [label=\"";
- for (int i = 0; i < that->elements()->length(); i++) {
- if (i > 0) os_ << " ";
- TextElement elm = that->elements()->at(i);
- switch (elm.text_type()) {
- case TextElement::ATOM: {
- Vector<const uc16> data = elm.atom()->data();
- for (int i = 0; i < data.length(); i++) {
- os_ << static_cast<char>(data[i]);
- }
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* node = elm.char_class();
- os_ << "[";
- if (node->is_negated()) os_ << "^";
- for (int j = 0; j < node->ranges(zone)->length(); j++) {
- CharacterRange range = node->ranges(zone)->at(j);
- os_ << AsUC16(range.from()) << "-" << AsUC16(range.to());
- }
- os_ << "]";
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- os_ << "\", shape=box, peripheries=2];\n";
- PrintAttributes(that);
- os_ << " n" << that << " -> n" << that->on_success() << ";\n";
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitBackReference(BackReferenceNode* that) {
- os_ << " n" << that << " [label=\"$" << that->start_register() << "..$"
- << that->end_register() << "\", shape=doubleoctagon];\n";
- PrintAttributes(that);
- os_ << " n" << that << " -> n" << that->on_success() << ";\n";
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitEnd(EndNode* that) {
- os_ << " n" << that << " [style=bold, shape=point];\n";
- PrintAttributes(that);
-}
-
-
-void DotPrinter::VisitAssertion(AssertionNode* that) {
- os_ << " n" << that << " [";
- switch (that->assertion_type()) {
- case AssertionNode::AT_END:
- os_ << "label=\"$\", shape=septagon";
- break;
- case AssertionNode::AT_START:
- os_ << "label=\"^\", shape=septagon";
- break;
- case AssertionNode::AT_BOUNDARY:
- os_ << "label=\"\\b\", shape=septagon";
- break;
- case AssertionNode::AT_NON_BOUNDARY:
- os_ << "label=\"\\B\", shape=septagon";
- break;
- case AssertionNode::AFTER_NEWLINE:
- os_ << "label=\"(?<=\\n)\", shape=septagon";
- break;
- }
- os_ << "];\n";
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- os_ << " n" << that << " -> n" << successor << ";\n";
- Visit(successor);
-}
-
-
-void DotPrinter::VisitAction(ActionNode* that) {
- os_ << " n" << that << " [";
- switch (that->action_type_) {
- case ActionNode::SET_REGISTER:
- os_ << "label=\"$" << that->data_.u_store_register.reg
- << ":=" << that->data_.u_store_register.value << "\", shape=octagon";
- break;
- case ActionNode::INCREMENT_REGISTER:
- os_ << "label=\"$" << that->data_.u_increment_register.reg
- << "++\", shape=octagon";
- break;
- case ActionNode::STORE_POSITION:
- os_ << "label=\"$" << that->data_.u_position_register.reg
- << ":=$pos\", shape=octagon";
- break;
- case ActionNode::BEGIN_SUBMATCH:
- os_ << "label=\"$" << that->data_.u_submatch.current_position_register
- << ":=$pos,begin\", shape=septagon";
- break;
- case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
- os_ << "label=\"escape\", shape=septagon";
- break;
- case ActionNode::EMPTY_MATCH_CHECK:
- os_ << "label=\"$" << that->data_.u_empty_match_check.start_register
- << "=$pos?,$" << that->data_.u_empty_match_check.repetition_register
- << "<" << that->data_.u_empty_match_check.repetition_limit
- << "?\", shape=septagon";
- break;
- case ActionNode::CLEAR_CAPTURES: {
- os_ << "label=\"clear $" << that->data_.u_clear_captures.range_from
- << " to $" << that->data_.u_clear_captures.range_to
- << "\", shape=septagon";
- break;
- }
- }
- os_ << "];\n";
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- os_ << " n" << that << " -> n" << successor << ";\n";
- Visit(successor);
-}
-
-
-class DispatchTableDumper {
- public:
- explicit DispatchTableDumper(std::ostream& os) : os_(os) {}
- void Call(uc16 key, DispatchTable::Entry entry);
- private:
- std::ostream& os_;
-};
-
-
-void DispatchTableDumper::Call(uc16 key, DispatchTable::Entry entry) {
- os_ << "[" << AsUC16(key) << "-" << AsUC16(entry.to()) << "]: {";
- OutSet* set = entry.out_set();
- bool first = true;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (set->Get(i)) {
- if (first) {
- first = false;
- } else {
- os_ << ", ";
- }
- os_ << i;
- }
- }
- os_ << "}\n";
-}
-
-
-void DispatchTable::Dump() {
- OFStream os(stderr);
- DispatchTableDumper dumper(os);
- tree()->ForEach(&dumper);
-}
-
-
-void RegExpEngine::DotPrint(const char* label,
- RegExpNode* node,
- bool ignore_case) {
- StdoutStream os;
- DotPrinter printer(os, ignore_case);
- printer.PrintNode(label, node);
-}
-
-
-#endif // DEBUG
-
-
-// -------------------------------------------------------------------
-// Tree to graph conversion
-
-RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<TextElement>* elms =
- new(compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
- elms->Add(TextElement::Atom(this), compiler->zone());
- return new (compiler->zone())
- TextNode(elms, compiler->read_backward(), on_success);
-}
-
-
-RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new (compiler->zone())
- TextNode(elements(), compiler->read_backward(), on_success);
-}
-
-
-static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
- const int* special_class,
- int length) {
- length--; // Remove final marker.
- DCHECK_EQ(kRangeEndMarker, special_class[length]);
- DCHECK_NE(0, ranges->length());
- DCHECK_NE(0, length);
- DCHECK_NE(0, special_class[0]);
- if (ranges->length() != (length >> 1) + 1) {
- return false;
- }
- CharacterRange range = ranges->at(0);
- if (range.from() != 0) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- if (special_class[i] != (range.to() + 1)) {
- return false;
- }
- range = ranges->at((i >> 1) + 1);
- if (special_class[i+1] != range.from()) {
- return false;
- }
- }
- if (range.to() != String::kMaxCodePoint) {
- return false;
- }
- return true;
-}
-
-
-static bool CompareRanges(ZoneList<CharacterRange>* ranges,
- const int* special_class,
- int length) {
- length--; // Remove final marker.
- DCHECK_EQ(kRangeEndMarker, special_class[length]);
- if (ranges->length() * 2 != length) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- CharacterRange range = ranges->at(i >> 1);
- if (range.from() != special_class[i] ||
- range.to() != special_class[i + 1] - 1) {
- return false;
- }
- }
- return true;
-}
-
-
-bool RegExpCharacterClass::is_standard(Zone* zone) {
- // TODO(lrn): Remove need for this function, by not throwing away information
- // along the way.
- if (is_negated()) {
- return false;
- }
- if (set_.is_standard()) {
- return true;
- }
- if (CompareRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('s');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('S');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('.');
- return true;
- }
- if (CompareRanges(set_.ranges(zone),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('n');
- return true;
- }
- if (CompareRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('w');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('W');
- return true;
- }
- return false;
-}
-
-
-UnicodeRangeSplitter::UnicodeRangeSplitter(Zone* zone,
- ZoneList<CharacterRange>* base)
- : zone_(zone),
- table_(zone),
- bmp_(nullptr),
- lead_surrogates_(nullptr),
- trail_surrogates_(nullptr),
- non_bmp_(nullptr) {
- // The unicode range splitter categorizes given character ranges into:
- // - Code points from the BMP representable by one code unit.
- // - Code points outside the BMP that need to be split into surrogate pairs.
- // - Lone lead surrogates.
- // - Lone trail surrogates.
- // Lone surrogates are valid code points, even though no actual characters.
- // They require special matching to make sure we do not split surrogate pairs.
- // We use the dispatch table to accomplish this. The base range is split up
- // by the table by the overlay ranges, and the Call callback is used to
- // filter and collect ranges for each category.
- for (int i = 0; i < base->length(); i++) {
- table_.AddRange(base->at(i), kBase, zone_);
- }
- // Add overlay ranges.
- table_.AddRange(CharacterRange::Range(0, kLeadSurrogateStart - 1),
- kBmpCodePoints, zone_);
- table_.AddRange(CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd),
- kLeadSurrogates, zone_);
- table_.AddRange(
- CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
- kTrailSurrogates, zone_);
- table_.AddRange(
- CharacterRange::Range(kTrailSurrogateEnd + 1, kNonBmpStart - 1),
- kBmpCodePoints, zone_);
- table_.AddRange(CharacterRange::Range(kNonBmpStart, kNonBmpEnd),
- kNonBmpCodePoints, zone_);
- table_.ForEach(this);
-}
-
-
-void UnicodeRangeSplitter::Call(uc32 from, DispatchTable::Entry entry) {
- OutSet* outset = entry.out_set();
- if (!outset->Get(kBase)) return;
- ZoneList<CharacterRange>** target = nullptr;
- if (outset->Get(kBmpCodePoints)) {
- target = &bmp_;
- } else if (outset->Get(kLeadSurrogates)) {
- target = &lead_surrogates_;
- } else if (outset->Get(kTrailSurrogates)) {
- target = &trail_surrogates_;
- } else {
- DCHECK(outset->Get(kNonBmpCodePoints));
- target = &non_bmp_;
- }
- if (*target == nullptr)
- *target = new (zone_) ZoneList<CharacterRange>(2, zone_);
- (*target)->Add(CharacterRange::Range(entry.from(), entry.to()), zone_);
-}
-
-void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
- ZoneList<CharacterRange>* bmp = splitter->bmp();
- if (bmp == nullptr) return;
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
- compiler->zone(), bmp, compiler->read_backward(), on_success,
- default_flags)));
-}
-
-void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success,
- UnicodeRangeSplitter* splitter) {
- ZoneList<CharacterRange>* non_bmp = splitter->non_bmp();
- if (non_bmp == nullptr) return;
- DCHECK(!compiler->one_byte());
- Zone* zone = compiler->zone();
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- CharacterRange::Canonicalize(non_bmp);
- for (int i = 0; i < non_bmp->length(); i++) {
- // Match surrogate pair.
- // E.g. [\u10005-\u11005] becomes
- // \ud800[\udc05-\udfff]|
- // [\ud801-\ud803][\udc00-\udfff]|
- // \ud804[\udc00-\udc05]
- uc32 from = non_bmp->at(i).from();
- uc32 to = non_bmp->at(i).to();
- uc16 from_l = unibrow::Utf16::LeadSurrogate(from);
- uc16 from_t = unibrow::Utf16::TrailSurrogate(from);
- uc16 to_l = unibrow::Utf16::LeadSurrogate(to);
- uc16 to_t = unibrow::Utf16::TrailSurrogate(to);
- if (from_l == to_l) {
- // The lead surrogate is the same.
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Singleton(from_l),
- CharacterRange::Range(from_t, to_t), compiler->read_backward(),
- on_success, default_flags)));
- } else {
- if (from_t != kTrailSurrogateStart) {
- // Add [from_l][from_t-\udfff]
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Singleton(from_l),
- CharacterRange::Range(from_t, kTrailSurrogateEnd),
- compiler->read_backward(), on_success, default_flags)));
- from_l++;
- }
- if (to_t != kTrailSurrogateEnd) {
- // Add [to_l][\udc00-to_t]
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Singleton(to_l),
- CharacterRange::Range(kTrailSurrogateStart, to_t),
- compiler->read_backward(), on_success, default_flags)));
- to_l--;
- }
- if (from_l <= to_l) {
- // Add [from_l-to_l][\udc00-\udfff]
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Range(from_l, to_l),
- CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
- compiler->read_backward(), on_success, default_flags)));
- }
- }
- }
-}
-
-RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
- RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
- ZoneList<CharacterRange>* match, RegExpNode* on_success, bool read_backward,
- JSRegExp::Flags flags) {
- Zone* zone = compiler->zone();
- RegExpNode* match_node = TextNode::CreateForCharacterRanges(
- zone, match, read_backward, on_success, flags);
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- RegExpLookaround::Builder lookaround(false, match_node, stack_register,
- position_register);
- RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookbehind, !read_backward, lookaround.on_match_success(), flags);
- return lookaround.ForMatch(negative_match);
-}
-
-RegExpNode* MatchAndNegativeLookaroundInReadDirection(
- RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
- ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
- bool read_backward, JSRegExp::Flags flags) {
- Zone* zone = compiler->zone();
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- RegExpLookaround::Builder lookaround(false, on_success, stack_register,
- position_register);
- RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookahead, read_backward, lookaround.on_match_success(), flags);
- return TextNode::CreateForCharacterRanges(
- zone, match, read_backward, lookaround.ForMatch(negative_match), flags);
-}
-
-void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success,
- UnicodeRangeSplitter* splitter) {
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- ZoneList<CharacterRange>* lead_surrogates = splitter->lead_surrogates();
- if (lead_surrogates == nullptr) return;
- Zone* zone = compiler->zone();
- // E.g. \ud801 becomes \ud801(?![\udc00-\udfff]).
- ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
-
- RegExpNode* match;
- if (compiler->read_backward()) {
- // Reading backward. Assert that reading forward, there is no trail
- // surrogate, and then backward match the lead surrogate.
- match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, trail_surrogates, lead_surrogates, on_success, true,
- default_flags);
- } else {
- // Reading forward. Forward match the lead surrogate and assert that
- // no trail surrogate follows.
- match = MatchAndNegativeLookaroundInReadDirection(
- compiler, lead_surrogates, trail_surrogates, on_success, false,
- default_flags);
- }
- result->AddAlternative(GuardedAlternative(match));
-}
-
-void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success,
- UnicodeRangeSplitter* splitter) {
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- ZoneList<CharacterRange>* trail_surrogates = splitter->trail_surrogates();
- if (trail_surrogates == nullptr) return;
- Zone* zone = compiler->zone();
- // E.g. \udc01 becomes (?<![\ud800-\udbff])\udc01
- ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
-
- RegExpNode* match;
- if (compiler->read_backward()) {
- // Reading backward. Backward match the trail surrogate and assert that no
- // lead surrogate precedes it.
- match = MatchAndNegativeLookaroundInReadDirection(
- compiler, trail_surrogates, lead_surrogates, on_success, true,
- default_flags);
- } else {
- // Reading forward. Assert that reading backward, there is no lead
- // surrogate, and then forward match the trail surrogate.
- match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, lead_surrogates, trail_surrogates, on_success, false,
- default_flags);
- }
- result->AddAlternative(GuardedAlternative(match));
-}
-
-RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- // This implements ES2015 21.2.5.2.3, AdvanceStringIndex.
- DCHECK(!compiler->read_backward());
- Zone* zone = compiler->zone();
- // Advance any character. If the character happens to be a lead surrogate and
- // we advanced into the middle of a surrogate pair, it will work out, as
- // nothing will match from there. We will have to advance again, consuming
- // the associated trail surrogate.
- ZoneList<CharacterRange>* range = CharacterRange::List(
- zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- return TextNode::CreateForCharacterRanges(zone, range, false, on_success,
- default_flags);
-}
-
-void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
-#ifdef V8_INTL_SUPPORT
- DCHECK(CharacterRange::IsCanonical(ranges));
-
- // Micro-optimization to avoid passing large ranges to UnicodeSet::closeOver.
- // See also https://crbug.com/v8/6727.
- // TODO(jgruber): This only covers the special case of the {0,0x10FFFF} range,
- // which we use frequently internally. But large ranges can also easily be
- // created by the user. We might want to have a more general caching mechanism
- // for such ranges.
- if (ranges->length() == 1 && ranges->at(0).IsEverything(kNonBmpEnd)) return;
-
- // Use ICU to compute the case fold closure over the ranges.
- icu::UnicodeSet set;
- for (int i = 0; i < ranges->length(); i++) {
- set.add(ranges->at(i).from(), ranges->at(i).to());
- }
- ranges->Clear();
- set.closeOver(USET_CASE_INSENSITIVE);
- // Full case mapping map single characters to multiple characters.
- // Those are represented as strings in the set. Remove them so that
- // we end up with only simple and common case mappings.
- set.removeAllStrings();
- for (int i = 0; i < set.getRangeCount(); i++) {
- ranges->Add(CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
- zone);
- }
- // No errors and everything we collected have been ranges.
- CharacterRange::Canonicalize(ranges);
-#endif // V8_INTL_SUPPORT
-}
-
-
-RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- set_.Canonicalize();
- Zone* zone = compiler->zone();
- ZoneList<CharacterRange>* ranges = this->ranges(zone);
- if (NeedsUnicodeCaseEquivalents(flags_)) {
- AddUnicodeCaseEquivalents(ranges, zone);
- }
- if (IsUnicode(flags_) && !compiler->one_byte() &&
- !contains_split_surrogate()) {
- if (is_negated()) {
- ZoneList<CharacterRange>* negated =
- new (zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::Negate(ranges, negated, zone);
- ranges = negated;
- }
- if (ranges->length() == 0) {
- JSRegExp::Flags default_flags;
- RegExpCharacterClass* fail =
- new (zone) RegExpCharacterClass(zone, ranges, default_flags);
- return new (zone) TextNode(fail, compiler->read_backward(), on_success);
- }
- if (standard_type() == '*') {
- return UnanchoredAdvance(compiler, on_success);
- } else {
- ChoiceNode* result = new (zone) ChoiceNode(2, zone);
- UnicodeRangeSplitter splitter(zone, ranges);
- AddBmpCharacters(compiler, result, on_success, &splitter);
- AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
- AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
- AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
- return result;
- }
- } else {
- return new (zone) TextNode(this, compiler->read_backward(), on_success);
- }
-}
-
-
-int CompareFirstChar(RegExpTree* const* a, RegExpTree* const* b) {
- RegExpAtom* atom1 = (*a)->AsAtom();
- RegExpAtom* atom2 = (*b)->AsAtom();
- uc16 character1 = atom1->data().at(0);
- uc16 character2 = atom2->data().at(0);
- if (character1 < character2) return -1;
- if (character1 > character2) return 1;
- return 0;
-}
-
-#ifdef V8_INTL_SUPPORT
-
-// Case Insensitve comparesion
-int CompareFirstCharCaseInsensitve(RegExpTree* const* a, RegExpTree* const* b) {
- RegExpAtom* atom1 = (*a)->AsAtom();
- RegExpAtom* atom2 = (*b)->AsAtom();
- icu::UnicodeString character1(atom1->data().at(0));
- return character1.caseCompare(atom2->data().at(0), U_FOLD_CASE_DEFAULT);
-}
-
-#else
-
-static unibrow::uchar Canonical(
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
- unibrow::uchar c) {
- unibrow::uchar chars[unibrow::Ecma262Canonicalize::kMaxWidth];
- int length = canonicalize->get(c, '\0', chars);
- DCHECK_LE(length, 1);
- unibrow::uchar canonical = c;
- if (length == 1) canonical = chars[0];
- return canonical;
-}
-
-int CompareFirstCharCaseIndependent(
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
- RegExpTree* const* a, RegExpTree* const* b) {
- RegExpAtom* atom1 = (*a)->AsAtom();
- RegExpAtom* atom2 = (*b)->AsAtom();
- unibrow::uchar character1 = atom1->data().at(0);
- unibrow::uchar character2 = atom2->data().at(0);
- if (character1 == character2) return 0;
- if (character1 >= 'a' || character2 >= 'a') {
- character1 = Canonical(canonicalize, character1);
- character2 = Canonical(canonicalize, character2);
- }
- return static_cast<int>(character1) - static_cast<int>(character2);
-}
-#endif // V8_INTL_SUPPORT
-
-// We can stable sort runs of atoms, since the order does not matter if they
-// start with different characters.
-// Returns true if any consecutive atoms were found.
-bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
- bool found_consecutive_atoms = false;
- for (int i = 0; i < length; i++) {
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (alternative->IsAtom()) break;
- i++;
- }
- // i is length or it is the index of an atom.
- if (i == length) break;
- int first_atom = i;
- JSRegExp::Flags flags = alternatives->at(i)->AsAtom()->flags();
- i++;
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (!alternative->IsAtom()) break;
- if (alternative->AsAtom()->flags() != flags) break;
- i++;
- }
- // Sort atoms to get ones with common prefixes together.
- // This step is more tricky if we are in a case-independent regexp,
- // because it would change /is|I/ to /I|is/, and order matters when
- // the regexp parts don't match only disjoint starting points. To fix
- // this we have a version of CompareFirstChar that uses case-
- // independent character classes for comparison.
- DCHECK_LT(first_atom, alternatives->length());
- DCHECK_LE(i, alternatives->length());
- DCHECK_LE(first_atom, i);
- if (IgnoreCase(flags)) {
-#ifdef V8_INTL_SUPPORT
- alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
- i - first_atom);
-#else
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- compiler->isolate()->regexp_macro_assembler_canonicalize();
- auto compare_closure =
- [canonicalize](RegExpTree* const* a, RegExpTree* const* b) {
- return CompareFirstCharCaseIndependent(canonicalize, a, b);
- };
- alternatives->StableSort(compare_closure, first_atom, i - first_atom);
-#endif // V8_INTL_SUPPORT
- } else {
- alternatives->StableSort(CompareFirstChar, first_atom, i - first_atom);
- }
- if (i - first_atom > 1) found_consecutive_atoms = true;
- }
- return found_consecutive_atoms;
-}
-
-
-// Optimizes ab|ac|az to a(?:b|c|d).
-void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
- Zone* zone = compiler->zone();
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
-
- int write_posn = 0;
- int i = 0;
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (!alternative->IsAtom()) {
- alternatives->at(write_posn++) = alternatives->at(i);
- i++;
- continue;
- }
- RegExpAtom* const atom = alternative->AsAtom();
- JSRegExp::Flags flags = atom->flags();
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeString common_prefix(atom->data().at(0));
-#else
- unibrow::uchar common_prefix = atom->data().at(0);
-#endif // V8_INTL_SUPPORT
- int first_with_prefix = i;
- int prefix_length = atom->length();
- i++;
- while (i < length) {
- alternative = alternatives->at(i);
- if (!alternative->IsAtom()) break;
- RegExpAtom* const atom = alternative->AsAtom();
- if (atom->flags() != flags) break;
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeString new_prefix(atom->data().at(0));
- if (new_prefix != common_prefix) {
- if (!IgnoreCase(flags)) break;
- if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
- break;
- }
-#else
- unibrow::uchar new_prefix = atom->data().at(0);
- if (new_prefix != common_prefix) {
- if (!IgnoreCase(flags)) break;
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- compiler->isolate()->regexp_macro_assembler_canonicalize();
- new_prefix = Canonical(canonicalize, new_prefix);
- common_prefix = Canonical(canonicalize, common_prefix);
- if (new_prefix != common_prefix) break;
- }
-#endif // V8_INTL_SUPPORT
- prefix_length = Min(prefix_length, atom->length());
- i++;
- }
- if (i > first_with_prefix + 2) {
- // Found worthwhile run of alternatives with common prefix of at least one
- // character. The sorting function above did not sort on more than one
- // character for reasons of correctness, but there may still be a longer
- // common prefix if the terms were similar or presorted in the input.
- // Find out how long the common prefix is.
- int run_length = i - first_with_prefix;
- RegExpAtom* const atom = alternatives->at(first_with_prefix)->AsAtom();
- for (int j = 1; j < run_length && prefix_length > 1; j++) {
- RegExpAtom* old_atom =
- alternatives->at(j + first_with_prefix)->AsAtom();
- for (int k = 1; k < prefix_length; k++) {
- if (atom->data().at(k) != old_atom->data().at(k)) {
- prefix_length = k;
- break;
- }
- }
- }
- RegExpAtom* prefix = new (zone)
- RegExpAtom(atom->data().SubVector(0, prefix_length), flags);
- ZoneList<RegExpTree*>* pair = new (zone) ZoneList<RegExpTree*>(2, zone);
- pair->Add(prefix, zone);
- ZoneList<RegExpTree*>* suffixes =
- new (zone) ZoneList<RegExpTree*>(run_length, zone);
- for (int j = 0; j < run_length; j++) {
- RegExpAtom* old_atom =
- alternatives->at(j + first_with_prefix)->AsAtom();
- int len = old_atom->length();
- if (len == prefix_length) {
- suffixes->Add(new (zone) RegExpEmpty(), zone);
- } else {
- RegExpTree* suffix = new (zone) RegExpAtom(
- old_atom->data().SubVector(prefix_length, old_atom->length()),
- flags);
- suffixes->Add(suffix, zone);
- }
- }
- pair->Add(new (zone) RegExpDisjunction(suffixes), zone);
- alternatives->at(write_posn++) = new (zone) RegExpAlternative(pair);
- } else {
- // Just copy any non-worthwhile alternatives.
- for (int j = first_with_prefix; j < i; j++) {
- alternatives->at(write_posn++) = alternatives->at(j);
- }
- }
- }
- alternatives->Rewind(write_posn); // Trim end of array.
-}
-
-
-// Optimizes b|c|z to [bcz].
-void RegExpDisjunction::FixSingleCharacterDisjunctions(
- RegExpCompiler* compiler) {
- Zone* zone = compiler->zone();
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
-
- int write_posn = 0;
- int i = 0;
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (!alternative->IsAtom()) {
- alternatives->at(write_posn++) = alternatives->at(i);
- i++;
- continue;
- }
- RegExpAtom* const atom = alternative->AsAtom();
- if (atom->length() != 1) {
- alternatives->at(write_posn++) = alternatives->at(i);
- i++;
- continue;
- }
- JSRegExp::Flags flags = atom->flags();
- DCHECK_IMPLIES(IsUnicode(flags),
- !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
- bool contains_trail_surrogate =
- unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
- int first_in_run = i;
- i++;
- // Find a run of single-character atom alternatives that have identical
- // flags (case independence and unicode-ness).
- while (i < length) {
- alternative = alternatives->at(i);
- if (!alternative->IsAtom()) break;
- RegExpAtom* const atom = alternative->AsAtom();
- if (atom->length() != 1) break;
- if (atom->flags() != flags) break;
- DCHECK_IMPLIES(IsUnicode(flags),
- !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
- contains_trail_surrogate |=
- unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
- i++;
- }
- if (i > first_in_run + 1) {
- // Found non-trivial run of single-character alternatives.
- int run_length = i - first_in_run;
- ZoneList<CharacterRange>* ranges =
- new (zone) ZoneList<CharacterRange>(2, zone);
- for (int j = 0; j < run_length; j++) {
- RegExpAtom* old_atom = alternatives->at(j + first_in_run)->AsAtom();
- DCHECK_EQ(old_atom->length(), 1);
- ranges->Add(CharacterRange::Singleton(old_atom->data().at(0)), zone);
- }
- RegExpCharacterClass::CharacterClassFlags character_class_flags;
- if (IsUnicode(flags) && contains_trail_surrogate) {
- character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
- }
- alternatives->at(write_posn++) = new (zone)
- RegExpCharacterClass(zone, ranges, flags, character_class_flags);
- } else {
- // Just copy any trivial alternatives.
- for (int j = first_in_run; j < i; j++) {
- alternatives->at(write_posn++) = alternatives->at(j);
- }
- }
- }
- alternatives->Rewind(write_posn); // Trim end of array.
-}
-
-
-RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
-
- if (alternatives->length() > 2) {
- bool found_consecutive_atoms = SortConsecutiveAtoms(compiler);
- if (found_consecutive_atoms) RationalizeConsecutiveAtoms(compiler);
- FixSingleCharacterDisjunctions(compiler);
- if (alternatives->length() == 1) {
- return alternatives->at(0)->ToNode(compiler, on_success);
- }
- }
-
- int length = alternatives->length();
-
- ChoiceNode* result =
- new(compiler->zone()) ChoiceNode(length, compiler->zone());
- for (int i = 0; i < length; i++) {
- GuardedAlternative alternative(alternatives->at(i)->ToNode(compiler,
- on_success));
- result->AddAlternative(alternative);
- }
- return result;
-}
-
-
-RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(min(),
- max(),
- is_greedy(),
- body(),
- compiler,
- on_success);
-}
-
-
-// Scoped object to keep track of how much we unroll quantifier loops in the
-// regexp graph generator.
-class RegExpExpansionLimiter {
- public:
- static const int kMaxExpansionFactor = 6;
- RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
- : compiler_(compiler),
- saved_expansion_factor_(compiler->current_expansion_factor()),
- ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
- DCHECK_LT(0, factor);
- if (ok_to_expand_) {
- if (factor > kMaxExpansionFactor) {
- // Avoid integer overflow of the current expansion factor.
- ok_to_expand_ = false;
- compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
- } else {
- int new_factor = saved_expansion_factor_ * factor;
- ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
- compiler->set_current_expansion_factor(new_factor);
- }
- }
- }
-
- ~RegExpExpansionLimiter() {
- compiler_->set_current_expansion_factor(saved_expansion_factor_);
- }
-
- bool ok_to_expand() { return ok_to_expand_; }
-
- private:
- RegExpCompiler* compiler_;
- int saved_expansion_factor_;
- bool ok_to_expand_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
-};
-
-
-RegExpNode* RegExpQuantifier::ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start) {
- // x{f, t} becomes this:
- //
- // (r++)<-.
- // | `
- // | (x)
- // v ^
- // (r=0)-->(?)---/ [if r < t]
- // |
- // [if r >= f] \----> ...
- //
-
- // 15.10.2.5 RepeatMatcher algorithm.
- // The parser has already eliminated the case where max is 0. In the case
- // where max_match is zero the parser has removed the quantifier if min was
- // > 0 and removed the atom if min was 0. See AddQuantifierToAtom.
-
- // If we know that we cannot match zero length then things are a little
- // simpler since we don't need to make the special zero length match check
- // from step 2.1. If the min and max are small we can unroll a little in
- // this case.
- static const int kMaxUnrolledMinMatches = 3; // Unroll (foo)+ and (foo){3,}
- static const int kMaxUnrolledMaxMatches = 3; // Unroll (foo)? and (foo){x,3}
- if (max == 0) return on_success; // This can happen due to recursion.
- bool body_can_be_empty = (body->min_match() == 0);
- int body_start_reg = RegExpCompiler::kNoRegister;
- Interval capture_registers = body->CaptureRegisters();
- bool needs_capture_clearing = !capture_registers.is_empty();
- Zone* zone = compiler->zone();
-
- if (body_can_be_empty) {
- body_start_reg = compiler->AllocateRegister();
- } else if (compiler->optimize() && !needs_capture_clearing) {
- // Only unroll if there are no captures and the body can't be
- // empty.
- {
- RegExpExpansionLimiter limiter(
- compiler, min + ((max != min) ? 1 : 0));
- if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
- int new_max = (max == kInfinity) ? max : max - min;
- // Recurse once to get the loop or optional matches after the fixed
- // ones.
- RegExpNode* answer = ToNode(
- 0, new_max, is_greedy, body, compiler, on_success, true);
- // Unroll the forced matches from 0 to min. This can cause chains of
- // TextNodes (which the parser does not generate). These should be
- // combined if it turns out they hinder good code generation.
- for (int i = 0; i < min; i++) {
- answer = body->ToNode(compiler, answer);
- }
- return answer;
- }
- }
- if (max <= kMaxUnrolledMaxMatches && min == 0) {
- DCHECK_LT(0, max); // Due to the 'if' above.
- RegExpExpansionLimiter limiter(compiler, max);
- if (limiter.ok_to_expand()) {
- // Unroll the optional matches up to max.
- RegExpNode* answer = on_success;
- for (int i = 0; i < max; i++) {
- ChoiceNode* alternation = new(zone) ChoiceNode(2, zone);
- if (is_greedy) {
- alternation->AddAlternative(
- GuardedAlternative(body->ToNode(compiler, answer)));
- alternation->AddAlternative(GuardedAlternative(on_success));
- } else {
- alternation->AddAlternative(GuardedAlternative(on_success));
- alternation->AddAlternative(
- GuardedAlternative(body->ToNode(compiler, answer)));
- }
- answer = alternation;
- if (not_at_start && !compiler->read_backward()) {
- alternation->set_not_at_start();
- }
- }
- return answer;
- }
- }
- }
- bool has_min = min > 0;
- bool has_max = max < RegExpTree::kInfinity;
- bool needs_counter = has_min || has_max;
- int reg_ctr = needs_counter
- ? compiler->AllocateRegister()
- : RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new (zone)
- LoopChoiceNode(body->min_match() == 0, compiler->read_backward(), zone);
- if (not_at_start && !compiler->read_backward()) center->set_not_at_start();
- RegExpNode* loop_return = needs_counter
- ? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
- : static_cast<RegExpNode*>(center);
- if (body_can_be_empty) {
- // If the body can be empty we need to check if it was and then
- // backtrack.
- loop_return = ActionNode::EmptyMatchCheck(body_start_reg,
- reg_ctr,
- min,
- loop_return);
- }
- RegExpNode* body_node = body->ToNode(compiler, loop_return);
- if (body_can_be_empty) {
- // If the body can be empty we need to store the start position
- // so we can bail out if it was empty.
- body_node = ActionNode::StorePosition(body_start_reg, false, body_node);
- }
- if (needs_capture_clearing) {
- // Before entering the body of this loop we need to clear captures.
- body_node = ActionNode::ClearCaptures(capture_registers, body_node);
- }
- GuardedAlternative body_alt(body_node);
- if (has_max) {
- Guard* body_guard =
- new(zone) Guard(reg_ctr, Guard::LT, max);
- body_alt.AddGuard(body_guard, zone);
- }
- GuardedAlternative rest_alt(on_success);
- if (has_min) {
- Guard* rest_guard = new(compiler->zone()) Guard(reg_ctr, Guard::GEQ, min);
- rest_alt.AddGuard(rest_guard, zone);
- }
- if (is_greedy) {
- center->AddLoopAlternative(body_alt);
- center->AddContinueAlternative(rest_alt);
- } else {
- center->AddContinueAlternative(rest_alt);
- center->AddLoopAlternative(body_alt);
- }
- if (needs_counter) {
- return ActionNode::SetRegister(reg_ctr, 0, center);
- } else {
- return center;
- }
-}
-
-namespace {
-// Desugar \b to (?<=\w)(?=\W)|(?<=\W)(?=\w) and
-// \B to (?<=\w)(?=\w)|(?<=\W)(?=\W)
-RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
- RegExpNode* on_success,
- RegExpAssertion::AssertionType type,
- JSRegExp::Flags flags) {
- DCHECK(NeedsUnicodeCaseEquivalents(flags));
- Zone* zone = compiler->zone();
- ZoneList<CharacterRange>* word_range =
- new (zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::AddClassEscape('w', word_range, true, zone);
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- ChoiceNode* result = new (zone) ChoiceNode(2, zone);
- // Add two choices. The (non-)boundary could start with a word or
- // a non-word-character.
- for (int i = 0; i < 2; i++) {
- bool lookbehind_for_word = i == 0;
- bool lookahead_for_word =
- (type == RegExpAssertion::BOUNDARY) ^ lookbehind_for_word;
- // Look to the left.
- RegExpLookaround::Builder lookbehind(lookbehind_for_word, on_success,
- stack_register, position_register);
- RegExpNode* backward = TextNode::CreateForCharacterRanges(
- zone, word_range, true, lookbehind.on_match_success(), flags);
- // Look to the right.
- RegExpLookaround::Builder lookahead(lookahead_for_word,
- lookbehind.ForMatch(backward),
- stack_register, position_register);
- RegExpNode* forward = TextNode::CreateForCharacterRanges(
- zone, word_range, false, lookahead.on_match_success(), flags);
- result->AddAlternative(GuardedAlternative(lookahead.ForMatch(forward)));
- }
- return result;
-}
-} // anonymous namespace
-
-RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- NodeInfo info;
- Zone* zone = compiler->zone();
-
- switch (assertion_type()) {
- case START_OF_LINE:
- return AssertionNode::AfterNewline(on_success);
- case START_OF_INPUT:
- return AssertionNode::AtStart(on_success);
- case BOUNDARY:
- return NeedsUnicodeCaseEquivalents(flags_)
- ? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY,
- flags_)
- : AssertionNode::AtBoundary(on_success);
- case NON_BOUNDARY:
- return NeedsUnicodeCaseEquivalents(flags_)
- ? BoundaryAssertionAsLookaround(compiler, on_success,
- NON_BOUNDARY, flags_)
- : AssertionNode::AtNonBoundary(on_success);
- case END_OF_INPUT:
- return AssertionNode::AtEnd(on_success);
- case END_OF_LINE: {
- // Compile $ in multiline regexps as an alternation with a positive
- // lookahead in one side and an end-of-input on the other side.
- // We need two registers for the lookahead.
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
- // The ChoiceNode to distinguish between a newline and end-of-input.
- ChoiceNode* result = new(zone) ChoiceNode(2, zone);
- // Create a newline atom.
- ZoneList<CharacterRange>* newline_ranges =
- new(zone) ZoneList<CharacterRange>(3, zone);
- CharacterRange::AddClassEscape('n', newline_ranges, false, zone);
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- RegExpCharacterClass* newline_atom =
- new (zone) RegExpCharacterClass('n', default_flags);
- TextNode* newline_matcher = new (zone) TextNode(
- newline_atom, false, ActionNode::PositiveSubmatchSuccess(
- stack_pointer_register, position_register,
- 0, // No captures inside.
- -1, // Ignored if no captures.
- on_success));
- // Create an end-of-input matcher.
- RegExpNode* end_of_line = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- newline_matcher);
- // Add the two alternatives to the ChoiceNode.
- GuardedAlternative eol_alternative(end_of_line);
- result->AddAlternative(eol_alternative);
- GuardedAlternative end_alternative(AssertionNode::AtEnd(on_success));
- result->AddAlternative(end_alternative);
- return result;
- }
- default:
- UNREACHABLE();
- }
- return on_success;
-}
-
-
-RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new (compiler->zone())
- BackReferenceNode(RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()), flags_,
- compiler->read_backward(), on_success);
-}
-
-
-RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return on_success;
-}
-
-
-RegExpLookaround::Builder::Builder(bool is_positive, RegExpNode* on_success,
- int stack_pointer_register,
- int position_register,
- int capture_register_count,
- int capture_register_start)
- : is_positive_(is_positive),
- on_success_(on_success),
- stack_pointer_register_(stack_pointer_register),
- position_register_(position_register) {
- if (is_positive_) {
- on_match_success_ = ActionNode::PositiveSubmatchSuccess(
- stack_pointer_register, position_register, capture_register_count,
- capture_register_start, on_success_);
- } else {
- Zone* zone = on_success_->zone();
- on_match_success_ = new (zone) NegativeSubmatchSuccess(
- stack_pointer_register, position_register, capture_register_count,
- capture_register_start, zone);
- }
-}
-
-
-RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) {
- if (is_positive_) {
- return ActionNode::BeginSubmatch(stack_pointer_register_,
- position_register_, match);
- } else {
- Zone* zone = on_success_->zone();
- // We use a ChoiceNode to represent the negative lookaround. The first
- // alternative is the negative match. On success, the end node backtracks.
- // On failure, the second alternative is tried and leads to success.
- // NegativeLookaheadChoiceNode is a special ChoiceNode that ignores the
- // first exit when calculating quick checks.
- ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
- GuardedAlternative(match), GuardedAlternative(on_success_), zone);
- return ActionNode::BeginSubmatch(stack_pointer_register_,
- position_register_, choice_node);
- }
-}
-
-
-RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
-
- const int registers_per_capture = 2;
- const int register_of_first_capture = 2;
- int register_count = capture_count_ * registers_per_capture;
- int register_start =
- register_of_first_capture + capture_from_ * registers_per_capture;
-
- RegExpNode* result;
- bool was_reading_backward = compiler->read_backward();
- compiler->set_read_backward(type() == LOOKBEHIND);
- Builder builder(is_positive(), on_success, stack_pointer_register,
- position_register, register_count, register_start);
- RegExpNode* match = body_->ToNode(compiler, builder.on_match_success());
- result = builder.ForMatch(match);
- compiler->set_read_backward(was_reading_backward);
- return result;
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(body(), index(), compiler, on_success);
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success) {
- DCHECK_NOT_NULL(body);
- int start_reg = RegExpCapture::StartRegister(index);
- int end_reg = RegExpCapture::EndRegister(index);
- if (compiler->read_backward()) std::swap(start_reg, end_reg);
- RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
- RegExpNode* body_node = body->ToNode(compiler, store_end);
- return ActionNode::StorePosition(start_reg, true, body_node);
-}
-
-
-RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* children = nodes();
- RegExpNode* current = on_success;
- if (compiler->read_backward()) {
- for (int i = 0; i < children->length(); i++) {
- current = children->at(i)->ToNode(compiler, current);
- }
- } else {
- for (int i = children->length() - 1; i >= 0; i--) {
- current = children->at(i)->ToNode(compiler, current);
- }
- }
- return current;
-}
-
-
-static void AddClass(const int* elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- elmc--;
- DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
- for (int i = 0; i < elmc; i += 2) {
- DCHECK(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange::Range(elmv[i], elmv[i + 1] - 1), zone);
- }
-}
-
-
-static void AddClassNegated(const int *elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- elmc--;
- DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
- DCHECK_NE(0x0000, elmv[0]);
- DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
- uc16 last = 0x0000;
- for (int i = 0; i < elmc; i += 2) {
- DCHECK(last <= elmv[i] - 1);
- DCHECK(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
- last = elmv[i + 1];
- }
- ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
-}
-
-void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
- bool add_unicode_case_equivalents,
- Zone* zone) {
- if (add_unicode_case_equivalents && (type == 'w' || type == 'W')) {
- // See #sec-runtime-semantics-wordcharacters-abstract-operation
- // In case of unicode and ignore_case, we need to create the closure over
- // case equivalent characters before negating.
- ZoneList<CharacterRange>* new_ranges =
- new (zone) ZoneList<CharacterRange>(2, zone);
- AddClass(kWordRanges, kWordRangeCount, new_ranges, zone);
- AddUnicodeCaseEquivalents(new_ranges, zone);
- if (type == 'W') {
- ZoneList<CharacterRange>* negated =
- new (zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::Negate(new_ranges, negated, zone);
- new_ranges = negated;
- }
- ranges->AddAll(*new_ranges, zone);
- return;
- }
- AddClassEscape(type, ranges, zone);
-}
-
-void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- switch (type) {
- case 's':
- AddClass(kSpaceRanges, kSpaceRangeCount, ranges, zone);
- break;
- case 'S':
- AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges, zone);
- break;
- case 'w':
- AddClass(kWordRanges, kWordRangeCount, ranges, zone);
- break;
- case 'W':
- AddClassNegated(kWordRanges, kWordRangeCount, ranges, zone);
- break;
- case 'd':
- AddClass(kDigitRanges, kDigitRangeCount, ranges, zone);
- break;
- case 'D':
- AddClassNegated(kDigitRanges, kDigitRangeCount, ranges, zone);
- break;
- case '.':
- AddClassNegated(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges,
- zone);
- break;
- // This is not a character range as defined by the spec but a
- // convenient shorthand for a character class that matches any
- // character.
- case '*':
- ranges->Add(CharacterRange::Everything(), zone);
- break;
- // This is the set of characters matched by the $ and ^ symbols
- // in multiline mode.
- case 'n':
- AddClass(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges,
- zone);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-Vector<const int> CharacterRange::GetWordBounds() {
- return Vector<const int>(kWordRanges, kWordRangeCount - 1);
-}
-
-#ifdef V8_INTL_SUPPORT
-struct IgnoreSet {
- IgnoreSet() : set(BuildIgnoreSet()) {}
- const icu::UnicodeSet set;
-};
-
-struct SpecialAddSet {
- SpecialAddSet() : set(BuildSpecialAddSet()) {}
- const icu::UnicodeSet set;
-};
-
-icu::UnicodeSet BuildAsciiAToZSet() {
- icu::UnicodeSet set('a', 'z');
- set.add('A', 'Z');
- set.freeze();
- return set;
-}
-
-struct AsciiAToZSet {
- AsciiAToZSet() : set(BuildAsciiAToZSet()) {}
- const icu::UnicodeSet set;
-};
-
-static base::LazyInstance<IgnoreSet>::type ignore_set =
- LAZY_INSTANCE_INITIALIZER;
-
-static base::LazyInstance<SpecialAddSet>::type special_add_set =
- LAZY_INSTANCE_INITIALIZER;
-
-static base::LazyInstance<AsciiAToZSet>::type ascii_a_to_z_set =
- LAZY_INSTANCE_INITIALIZER;
-#endif // V8_INTL_SUPPORT
-
-// static
-void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
- ZoneList<CharacterRange>* ranges,
- bool is_one_byte) {
- CharacterRange::Canonicalize(ranges);
- int range_count = ranges->length();
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeSet others;
- for (int i = 0; i < range_count; i++) {
- CharacterRange range = ranges->at(i);
- uc32 from = range.from();
- if (from > String::kMaxUtf16CodeUnit) continue;
- uc32 to = Min(range.to(), String::kMaxUtf16CodeUnit);
- // Nothing to be done for surrogates.
- if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
- if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (from > String::kMaxOneByteCharCode) continue;
- if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
- }
- others.add(from, to);
- }
-
- // Set of characters already added to ranges that do not need to be added
- // again.
- icu::UnicodeSet already_added(others);
-
- // Set of characters in ranges that are in the 52 ASCII characters [a-zA-Z].
- icu::UnicodeSet in_ascii_a_to_z(others);
- in_ascii_a_to_z.retainAll(ascii_a_to_z_set.Pointer()->set);
-
- // Remove all chars in [a-zA-Z] from others.
- others.removeAll(in_ascii_a_to_z);
-
- // Set of characters in ranges that are overlapping with special add set.
- icu::UnicodeSet in_special_add(others);
- in_special_add.retainAll(special_add_set.Pointer()->set);
-
- others.removeAll(in_special_add);
-
- // Ignore all chars in ignore set.
- others.removeAll(ignore_set.Pointer()->set);
-
- // For most of the chars in ranges that is still in others, find the case
- // equivlant set by calling closeOver(USET_CASE_INSENSITIVE).
- others.closeOver(USET_CASE_INSENSITIVE);
-
- // Because closeOver(USET_CASE_INSENSITIVE) may add ASCII [a-zA-Z] to others,
- // but ECMA262 "i" mode won't consider that, remove them from others.
- // Ex: U+017F add 'S' and 's' to others.
- others.removeAll(ascii_a_to_z_set.Pointer()->set);
-
- // Special handling for in_ascii_a_to_z.
- for (int32_t i = 0; i < in_ascii_a_to_z.getRangeCount(); i++) {
- UChar32 start = in_ascii_a_to_z.getRangeStart(i);
- UChar32 end = in_ascii_a_to_z.getRangeEnd(i);
- // Check if it is uppercase A-Z by checking bit 6.
- if (start & 0x0020) {
- // Add the lowercases
- others.add(start & 0x005F, end & 0x005F);
- } else {
- // Add the uppercases
- others.add(start | 0x0020, end | 0x0020);
- }
- }
-
- // Special handling for chars in "Special Add" set.
- for (int32_t i = 0; i < in_special_add.getRangeCount(); i++) {
- UChar32 end = in_special_add.getRangeEnd(i);
- for (UChar32 ch = in_special_add.getRangeStart(i); ch <= end; ch++) {
- // Add the uppercase of this character if itself is not an uppercase
- // character.
- // Note: The if condiction cannot be u_islower(ch) because ch could be
- // neither uppercase nor lowercase but Mn.
- if (!u_isupper(ch)) {
- others.add(u_toupper(ch));
- }
- icu::UnicodeSet candidates(ch, ch);
- candidates.closeOver(USET_CASE_INSENSITIVE);
- for (int32_t j = 0; j < candidates.getRangeCount(); j++) {
- UChar32 end2 = candidates.getRangeEnd(j);
- for (UChar32 ch2 = candidates.getRangeStart(j); ch2 <= end2; ch2++) {
- // Add character that is not uppercase to others.
- if (!u_isupper(ch2)) {
- others.add(ch2);
- }
- }
- }
- }
- }
-
- // Remove all characters which already in the ranges.
- others.removeAll(already_added);
-
- // Add others to the ranges
- for (int32_t i = 0; i < others.getRangeCount(); i++) {
- UChar32 from = others.getRangeStart(i);
- UChar32 to = others.getRangeEnd(i);
- if (from == to) {
- ranges->Add(CharacterRange::Singleton(from), zone);
- } else {
- ranges->Add(CharacterRange::Range(from, to), zone);
- }
- }
-#else
- for (int i = 0; i < range_count; i++) {
- CharacterRange range = ranges->at(i);
- uc32 bottom = range.from();
- if (bottom > String::kMaxUtf16CodeUnit) continue;
- uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
- // Nothing to be done for surrogates.
- if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
- if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (bottom > String::kMaxOneByteCharCode) continue;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
- }
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- if (top == bottom) {
- // If this is a singleton we just expand the one character.
- int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
- for (int i = 0; i < length; i++) {
- uc32 chr = chars[i];
- if (chr != bottom) {
- ranges->Add(CharacterRange::Singleton(chars[i]), zone);
- }
- }
- } else {
- // If this is a range we expand the characters block by block, expanding
- // contiguous subranges (blocks) one at a time. The approach is as
- // follows. For a given start character we look up the remainder of the
- // block that contains it (represented by the end point), for instance we
- // find 'z' if the character is 'c'. A block is characterized by the
- // property that all characters uncanonicalize in the same way, except
- // that each entry in the result is incremented by the distance from the
- // first element. So a-z is a block because 'a' uncanonicalizes to ['a',
- // 'A'] and the k'th letter uncanonicalizes to ['a' + k, 'A' + k]. Once
- // we've found the end point we look up its uncanonicalization and
- // produce a range for each element. For instance for [c-f] we look up
- // ['z', 'Z'] and produce [c-f] and [C-F]. We then only add a range if
- // it is not already contained in the input, so [c-f] will be skipped but
- // [C-F] will be added. If this range is not completely contained in a
- // block we do this for all the blocks covered by the range (handling
- // characters that is not in a block as a "singleton block").
- unibrow::uchar equivalents[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int pos = bottom;
- while (pos <= top) {
- int length =
- isolate->jsregexp_canonrange()->get(pos, '\0', equivalents);
- uc32 block_end;
- if (length == 0) {
- block_end = pos;
- } else {
- DCHECK_EQ(1, length);
- block_end = equivalents[0];
- }
- int end = (block_end > top) ? top : block_end;
- length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0',
- equivalents);
- for (int i = 0; i < length; i++) {
- uc32 c = equivalents[i];
- uc32 range_from = c - (block_end - pos);
- uc32 range_to = c - (block_end - end);
- if (!(bottom <= range_from && range_to <= top)) {
- ranges->Add(CharacterRange::Range(range_from, range_to), zone);
- }
- }
- pos = end + 1;
- }
- }
- }
-#endif // V8_INTL_SUPPORT
-}
-
-bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
- DCHECK_NOT_NULL(ranges);
- int n = ranges->length();
- if (n <= 1) return true;
- int max = ranges->at(0).to();
- for (int i = 1; i < n; i++) {
- CharacterRange next_range = ranges->at(i);
- if (next_range.from() <= max + 1) return false;
- max = next_range.to();
- }
- return true;
-}
-
-
-ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
- if (ranges_ == nullptr) {
- ranges_ = new(zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::AddClassEscape(standard_set_type_, ranges_, false, zone);
- }
- return ranges_;
-}
-
-
-// Move a number of elements in a zonelist to another position
-// in the same list. Handles overlapping source and target areas.
-static void MoveRanges(ZoneList<CharacterRange>* list,
- int from,
- int to,
- int count) {
- // Ranges are potentially overlapping.
- if (from < to) {
- for (int i = count - 1; i >= 0; i--) {
- list->at(to + i) = list->at(from + i);
- }
- } else {
- for (int i = 0; i < count; i++) {
- list->at(to + i) = list->at(from + i);
- }
- }
-}
-
-
-static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
- int count,
- CharacterRange insert) {
- // Inserts a range into list[0..count[, which must be sorted
- // by from value and non-overlapping and non-adjacent, using at most
- // list[0..count] for the result. Returns the number of resulting
- // canonicalized ranges. Inserting a range may collapse existing ranges into
- // fewer ranges, so the return value can be anything in the range 1..count+1.
- uc32 from = insert.from();
- uc32 to = insert.to();
- int start_pos = 0;
- int end_pos = count;
- for (int i = count - 1; i >= 0; i--) {
- CharacterRange current = list->at(i);
- if (current.from() > to + 1) {
- end_pos = i;
- } else if (current.to() + 1 < from) {
- start_pos = i + 1;
- break;
- }
- }
-
- // Inserted range overlaps, or is adjacent to, ranges at positions
- // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
- // not affected by the insertion.
- // If start_pos == end_pos, the range must be inserted before start_pos.
- // if start_pos < end_pos, the entire range from start_pos to end_pos
- // must be merged with the insert range.
-
- if (start_pos == end_pos) {
- // Insert between existing ranges at position start_pos.
- if (start_pos < count) {
- MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
- }
- list->at(start_pos) = insert;
- return count + 1;
- }
- if (start_pos + 1 == end_pos) {
- // Replace single existing range at position start_pos.
- CharacterRange to_replace = list->at(start_pos);
- int new_from = Min(to_replace.from(), from);
- int new_to = Max(to_replace.to(), to);
- list->at(start_pos) = CharacterRange::Range(new_from, new_to);
- return count;
- }
- // Replace a number of existing ranges from start_pos to end_pos - 1.
- // Move the remaining ranges down.
-
- int new_from = Min(list->at(start_pos).from(), from);
- int new_to = Max(list->at(end_pos - 1).to(), to);
- if (end_pos < count) {
- MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
- }
- list->at(start_pos) = CharacterRange::Range(new_from, new_to);
- return count - (end_pos - start_pos) + 1;
-}
-
-
-void CharacterSet::Canonicalize() {
- // Special/default classes are always considered canonical. The result
- // of calling ranges() will be sorted.
- if (ranges_ == nullptr) return;
- CharacterRange::Canonicalize(ranges_);
-}
-
-
-void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
- if (character_ranges->length() <= 1) return;
- // Check whether ranges are already canonical (increasing, non-overlapping,
- // non-adjacent).
- int n = character_ranges->length();
- int max = character_ranges->at(0).to();
- int i = 1;
- while (i < n) {
- CharacterRange current = character_ranges->at(i);
- if (current.from() <= max + 1) {
- break;
- }
- max = current.to();
- i++;
- }
- // Canonical until the i'th range. If that's all of them, we are done.
- if (i == n) return;
-
- // The ranges at index i and forward are not canonicalized. Make them so by
- // doing the equivalent of insertion sort (inserting each into the previous
- // list, in order).
- // Notice that inserting a range can reduce the number of ranges in the
- // result due to combining of adjacent and overlapping ranges.
- int read = i; // Range to insert.
- int num_canonical = i; // Length of canonicalized part of list.
- do {
- num_canonical = InsertRangeInCanonicalList(character_ranges,
- num_canonical,
- character_ranges->at(read));
- read++;
- } while (read < n);
- character_ranges->Rewind(num_canonical);
-
- DCHECK(CharacterRange::IsCanonical(character_ranges));
-}
-
-
-void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
- ZoneList<CharacterRange>* negated_ranges,
- Zone* zone) {
- DCHECK(CharacterRange::IsCanonical(ranges));
- DCHECK_EQ(0, negated_ranges->length());
- int range_count = ranges->length();
- uc32 from = 0;
- int i = 0;
- if (range_count > 0 && ranges->at(0).from() == 0) {
- from = ranges->at(0).to() + 1;
- i = 1;
- }
- while (i < range_count) {
- CharacterRange range = ranges->at(i);
- negated_ranges->Add(CharacterRange::Range(from, range.from() - 1), zone);
- from = range.to() + 1;
- i++;
- }
- if (from < String::kMaxCodePoint) {
- negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
- zone);
- }
-}
-
-
-// -------------------------------------------------------------------
-// Splay tree
-
-
-OutSet* OutSet::Extend(unsigned value, Zone* zone) {
- if (Get(value))
- return this;
- if (successors(zone) != nullptr) {
- for (int i = 0; i < successors(zone)->length(); i++) {
- OutSet* successor = successors(zone)->at(i);
- if (successor->Get(value))
- return successor;
- }
- } else {
- successors_ = new(zone) ZoneList<OutSet*>(2, zone);
- }
- OutSet* result = new(zone) OutSet(first_, remaining_);
- result->Set(value, zone);
- successors(zone)->Add(result, zone);
- return result;
-}
-
-
-void OutSet::Set(unsigned value, Zone *zone) {
- if (value < kFirstLimit) {
- first_ |= (1 << value);
- } else {
- if (remaining_ == nullptr)
- remaining_ = new(zone) ZoneList<unsigned>(1, zone);
- if (remaining_->is_empty() || !remaining_->Contains(value))
- remaining_->Add(value, zone);
- }
-}
-
-
-bool OutSet::Get(unsigned value) const {
- if (value < kFirstLimit) {
- return (first_ & (1 << value)) != 0;
- } else if (remaining_ == nullptr) {
- return false;
- } else {
- return remaining_->Contains(value);
- }
-}
-
-
-const uc32 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-
-
-void DispatchTable::AddRange(CharacterRange full_range, int value,
- Zone* zone) {
- CharacterRange current = full_range;
- if (tree()->is_empty()) {
- // If this is the first range we just insert into the table.
- ZoneSplayTree<Config>::Locator loc;
- bool inserted = tree()->Insert(current.from(), &loc);
- DCHECK(inserted);
- USE(inserted);
- loc.set_value(Entry(current.from(), current.to(),
- empty()->Extend(value, zone)));
- return;
- }
- // First see if there is a range to the left of this one that
- // overlaps.
- ZoneSplayTree<Config>::Locator loc;
- if (tree()->FindGreatestLessThan(current.from(), &loc)) {
- Entry* entry = &loc.value();
- // If we've found a range that overlaps with this one, and it
- // starts strictly to the left of this one, we have to fix it
- // because the following code only handles ranges that start on
- // or after the start point of the range we're adding.
- if (entry->from() < current.from() && entry->to() >= current.from()) {
- // Snap the overlapping range in half around the start point of
- // the range we're adding.
- CharacterRange left =
- CharacterRange::Range(entry->from(), current.from() - 1);
- CharacterRange right = CharacterRange::Range(current.from(), entry->to());
- // The left part of the overlapping range doesn't overlap.
- // Truncate the whole entry to be just the left part.
- entry->set_to(left.to());
- // The right part is the one that overlaps. We add this part
- // to the map and let the next step deal with merging it with
- // the range we're adding.
- ZoneSplayTree<Config>::Locator loc;
- bool inserted = tree()->Insert(right.from(), &loc);
- DCHECK(inserted);
- USE(inserted);
- loc.set_value(Entry(right.from(),
- right.to(),
- entry->out_set()));
- }
- }
- while (current.is_valid()) {
- if (tree()->FindLeastGreaterThan(current.from(), &loc) &&
- (loc.value().from() <= current.to()) &&
- (loc.value().to() >= current.from())) {
- Entry* entry = &loc.value();
- // We have overlap. If there is space between the start point of
- // the range we're adding and where the overlapping range starts
- // then we have to add a range covering just that space.
- if (current.from() < entry->from()) {
- ZoneSplayTree<Config>::Locator ins;
- bool inserted = tree()->Insert(current.from(), &ins);
- DCHECK(inserted);
- USE(inserted);
- ins.set_value(Entry(current.from(),
- entry->from() - 1,
- empty()->Extend(value, zone)));
- current.set_from(entry->from());
- }
- DCHECK_EQ(current.from(), entry->from());
- // If the overlapping range extends beyond the one we want to add
- // we have to snap the right part off and add it separately.
- if (entry->to() > current.to()) {
- ZoneSplayTree<Config>::Locator ins;
- bool inserted = tree()->Insert(current.to() + 1, &ins);
- DCHECK(inserted);
- USE(inserted);
- ins.set_value(Entry(current.to() + 1,
- entry->to(),
- entry->out_set()));
- entry->set_to(current.to());
- }
- DCHECK(entry->to() <= current.to());
- // The overlapping range is now completely contained by the range
- // we're adding so we can just update it and move the start point
- // of the range we're adding just past it.
- entry->AddValue(value, zone);
- DCHECK(entry->to() + 1 > current.from());
- current.set_from(entry->to() + 1);
- } else {
- // There is no overlap so we can just add the range
- ZoneSplayTree<Config>::Locator ins;
- bool inserted = tree()->Insert(current.from(), &ins);
- DCHECK(inserted);
- USE(inserted);
- ins.set_value(Entry(current.from(),
- current.to(),
- empty()->Extend(value, zone)));
- break;
- }
- }
-}
-
-
-OutSet* DispatchTable::Get(uc32 value) {
- ZoneSplayTree<Config>::Locator loc;
- if (!tree()->FindGreatestLessThan(value, &loc))
- return empty();
- Entry* entry = &loc.value();
- if (value <= entry->to())
- return entry->out_set();
- else
- return empty();
-}
-
-
-// -------------------------------------------------------------------
-// Analysis
-
-
-void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
- fail("Stack overflow");
- return;
- }
- if (that->info()->been_analyzed || that->info()->being_analyzed)
- return;
- that->info()->being_analyzed = true;
- that->Accept(this);
- that->info()->being_analyzed = false;
- that->info()->been_analyzed = true;
-}
-
-
-void Analysis::VisitEnd(EndNode* that) {
- // nothing to do
-}
-
-
-void TextNode::CalculateOffsets() {
- int element_count = elements()->length();
- // Set up the offsets of the elements relative to the start. This is a fixed
- // quantity since a TextNode can only contain fixed-width things.
- int cp_offset = 0;
- for (int i = 0; i < element_count; i++) {
- TextElement& elm = elements()->at(i);
- elm.set_cp_offset(cp_offset);
- cp_offset += elm.length();
- }
-}
-
-
-void Analysis::VisitText(TextNode* that) {
- that->MakeCaseIndependent(isolate(), is_one_byte_);
- EnsureAnalyzed(that->on_success());
- if (!has_failed()) {
- that->CalculateOffsets();
- }
-}
-
-
-void Analysis::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- EnsureAnalyzed(target);
- if (!has_failed()) {
- // If the next node is interested in what it follows then this node
- // has to be interested too so it can pass the information on.
- that->info()->AddFromFollowing(target->info());
- }
-}
-
-
-void Analysis::VisitChoice(ChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- EnsureAnalyzed(node);
- if (has_failed()) return;
- // Anything the following nodes need to know has to be known by
- // this node also, so it can pass it on.
- info->AddFromFollowing(node->info());
- }
-}
-
-
-void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- if (node != that->loop_node()) {
- EnsureAnalyzed(node);
- if (has_failed()) return;
- info->AddFromFollowing(node->info());
- }
- }
- // Check the loop last since it may need the value of this node
- // to get a correct result.
- EnsureAnalyzed(that->loop_node());
- if (!has_failed()) {
- info->AddFromFollowing(that->loop_node()->info());
- }
-}
-
-
-void Analysis::VisitBackReference(BackReferenceNode* that) {
- EnsureAnalyzed(that->on_success());
-}
-
-
-void Analysis::VisitAssertion(AssertionNode* that) {
- EnsureAnalyzed(that->on_success());
-}
-
-
-void BackReferenceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- // Working out the set of characters that a backreference can match is too
- // hard, so we just say that any character can match.
- bm->SetRest(offset);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
- RegExpMacroAssembler::kTableSize);
-
-
-void ChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- ZoneList<GuardedAlternative>* alts = alternatives();
- budget = (budget - 1) / alts->length();
- for (int i = 0; i < alts->length(); i++) {
- GuardedAlternative& alt = alts->at(i);
- if (alt.guards() != nullptr && alt.guards()->length() != 0) {
- bm->SetRest(offset); // Give up trying to fill in info.
- SaveBMInfo(bm, not_at_start, offset);
- return;
- }
- alt.node()->FillInBMInfo(isolate, offset, budget, bm, not_at_start);
- }
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- if (initial_offset >= bm->length()) return;
- int offset = initial_offset;
- int max_char = bm->max_char();
- for (int i = 0; i < elements()->length(); i++) {
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- TextElement text = elements()->at(i);
- if (text.text_type() == TextElement::ATOM) {
- RegExpAtom* atom = text.atom();
- for (int j = 0; j < atom->length(); j++, offset++) {
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- uc16 character = atom->data()[j];
- if (IgnoreCase(atom->flags())) {
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(
- isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
- chars, 4);
- for (int j = 0; j < length; j++) {
- bm->Set(offset, chars[j]);
- }
- } else {
- if (character <= max_char) bm->Set(offset, character);
- }
- }
- } else {
- DCHECK_EQ(TextElement::CHAR_CLASS, text.text_type());
- RegExpCharacterClass* char_class = text.char_class();
- ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
- if (char_class->is_negated()) {
- bm->SetAll(offset);
- } else {
- for (int k = 0; k < ranges->length(); k++) {
- CharacterRange& range = ranges->at(k);
- if (range.from() > max_char) continue;
- int to = Min(max_char, static_cast<int>(range.to()));
- bm->SetInterval(offset, Interval(range.from(), to));
- }
- }
- offset++;
- }
- }
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- on_success()->FillInBMInfo(isolate, offset, budget - 1, bm,
- true); // Not at start after a text node.
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
-}
-
-
-// -------------------------------------------------------------------
-// Dispatch table construction
-
-
-void DispatchTableConstructor::VisitEnd(EndNode* that) {
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::BuildTable(ChoiceNode* node) {
- node->set_being_calculated(true);
- ZoneList<GuardedAlternative>* alternatives = node->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- set_choice_index(i);
- alternatives->at(i).node()->Accept(this);
- }
- node->set_being_calculated(false);
-}
-
-
-class AddDispatchRange {
- public:
- explicit AddDispatchRange(DispatchTableConstructor* constructor)
- : constructor_(constructor) { }
- void Call(uc32 from, DispatchTable::Entry entry);
- private:
- DispatchTableConstructor* constructor_;
-};
-
-
-void AddDispatchRange::Call(uc32 from, DispatchTable::Entry entry) {
- constructor_->AddRange(CharacterRange::Range(from, entry.to()));
-}
-
-
-void DispatchTableConstructor::VisitChoice(ChoiceNode* node) {
- if (node->being_calculated())
- return;
- DispatchTable* table = node->GetTable(ignore_case_);
- AddDispatchRange adder(this);
- table->ForEach(&adder);
-}
-
-
-void DispatchTableConstructor::VisitBackReference(BackReferenceNode* that) {
- // TODO(160): Find the node that we refer back to and propagate its start
- // set back to here. For now we just accept anything.
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::VisitAssertion(AssertionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-
-static int CompareRangeByFrom(const CharacterRange* a,
- const CharacterRange* b) {
- return Compare<uc16>(a->from(), b->from());
-}
-
-
-void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
- ranges->Sort(CompareRangeByFrom);
- uc16 last = 0;
- for (int i = 0; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- if (last < range.from())
- AddRange(CharacterRange::Range(last, range.from() - 1));
- if (range.to() >= last) {
- if (range.to() == String::kMaxCodePoint) {
- return;
- } else {
- last = range.to() + 1;
- }
- }
- }
- AddRange(CharacterRange::Range(last, String::kMaxCodePoint));
-}
-
-
-void DispatchTableConstructor::VisitText(TextNode* that) {
- TextElement elm = that->elements()->at(0);
- switch (elm.text_type()) {
- case TextElement::ATOM: {
- uc16 c = elm.atom()->data()[0];
- AddRange(CharacterRange::Range(c, c));
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* tree = elm.char_class();
- ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
- if (tree->is_negated()) {
- AddInverse(ranges);
- } else {
- for (int i = 0; i < ranges->length(); i++)
- AddRange(ranges->at(i));
- }
- break;
- }
- default: {
- UNIMPLEMENTED();
- }
- }
-}
-
-
-void DispatchTableConstructor::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
- // If the regexp matching starts within a surrogate pair, step back
- // to the lead surrogate and start matching from there.
- DCHECK(!compiler->read_backward());
- Zone* zone = compiler->zone();
- ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
- ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
-
- ChoiceNode* optional_step_back = new (zone) ChoiceNode(2, zone);
-
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- RegExpNode* step_back = TextNode::CreateForCharacterRanges(
- zone, lead_surrogates, true, on_success, flags);
- RegExpLookaround::Builder builder(true, step_back, stack_register,
- position_register);
- RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
- zone, trail_surrogates, false, builder.on_match_success(), flags);
-
- optional_step_back->AddAlternative(
- GuardedAlternative(builder.ForMatch(match_trail)));
- optional_step_back->AddAlternative(GuardedAlternative(on_success));
-
- return optional_step_back;
-}
-
-
-RegExpEngine::CompilationResult RegExpEngine::Compile(
- Isolate* isolate, Zone* zone, RegExpCompileData* data,
- JSRegExp::Flags flags, Handle<String> pattern,
- Handle<String> sample_subject, bool is_one_byte) {
- if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig(isolate);
- }
- bool is_sticky = IsSticky(flags);
- bool is_global = IsGlobal(flags);
- bool is_unicode = IsUnicode(flags);
- RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
-
- if (compiler.optimize())
- compiler.set_optimize(!TooMuchRegExpCode(isolate, pattern));
-
- // Sample some characters from the middle of the string.
- static const int kSampleSize = 128;
-
- sample_subject = String::Flatten(isolate, sample_subject);
- int chars_sampled = 0;
- int half_way = (sample_subject->length() - kSampleSize) / 2;
- for (int i = Max(0, half_way);
- i < sample_subject->length() && chars_sampled < kSampleSize;
- i++, chars_sampled++) {
- compiler.frequency_collator()->CountCharacter(sample_subject->Get(i));
- }
-
- // Wrap the body of the regexp in capture #0.
- RegExpNode* captured_body = RegExpCapture::ToNode(data->tree,
- 0,
- &compiler,
- compiler.accept());
- RegExpNode* node = captured_body;
- bool is_end_anchored = data->tree->IsAnchoredAtEnd();
- bool is_start_anchored = data->tree->IsAnchoredAtStart();
- int max_length = data->tree->max_match();
- if (!is_start_anchored && !is_sticky) {
- // Add a .*? at the beginning, outside the body capture, unless
- // this expression is anchored at the beginning or sticky.
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- RegExpNode* loop_node = RegExpQuantifier::ToNode(
- 0, RegExpTree::kInfinity, false,
- new (zone) RegExpCharacterClass('*', default_flags), &compiler,
- captured_body, data->contains_anchor);
-
- if (data->contains_anchor) {
- // Unroll loop once, to take care of the case that might start
- // at the start of input.
- ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
- first_step_node->AddAlternative(GuardedAlternative(captured_body));
- first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
- new (zone) RegExpCharacterClass('*', default_flags), false,
- loop_node)));
- node = first_step_node;
- } else {
- node = loop_node;
- }
- }
- if (is_one_byte) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
- // Do it again to propagate the new nodes to places where they were not
- // put because they had not been calculated yet.
- if (node != nullptr) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
- }
- } else if (is_unicode && (is_global || is_sticky)) {
- node = OptionallyStepBackToLeadSurrogate(&compiler, node, flags);
- }
-
- if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
- data->node = node;
- Analysis analysis(isolate, is_one_byte);
- analysis.EnsureAnalyzed(node);
- if (analysis.has_failed()) {
- const char* error_message = analysis.error_message();
- return CompilationResult(isolate, error_message);
- }
-
- // Create the correct assembler for the architecture.
- std::unique_ptr<RegExpMacroAssembler> macro_assembler;
- if (!FLAG_regexp_interpret_all) {
- // Native regexp implementation.
- DCHECK(!FLAG_jitless);
-
- NativeRegExpMacroAssembler::Mode mode =
- is_one_byte ? NativeRegExpMacroAssembler::LATIN1
- : NativeRegExpMacroAssembler::UC16;
-
-#if V8_TARGET_ARCH_IA32
- macro_assembler.reset(new RegExpMacroAssemblerIA32(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_X64
- macro_assembler.reset(new RegExpMacroAssemblerX64(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_ARM
- macro_assembler.reset(new RegExpMacroAssemblerARM(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_ARM64
- macro_assembler.reset(new RegExpMacroAssemblerARM64(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_S390
- macro_assembler.reset(new RegExpMacroAssemblerS390(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_PPC
- macro_assembler.reset(new RegExpMacroAssemblerPPC(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_MIPS
- macro_assembler.reset(new RegExpMacroAssemblerMIPS(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_MIPS64
- macro_assembler.reset(new RegExpMacroAssemblerMIPS(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#else
-#error "Unsupported architecture"
-#endif
- } else {
- DCHECK(FLAG_regexp_interpret_all);
-
- // Interpreted regexp implementation.
- macro_assembler.reset(new RegExpMacroAssemblerIrregexp(isolate, zone));
- }
-
- macro_assembler->set_slow_safe(TooMuchRegExpCode(isolate, pattern));
-
- // Inserted here, instead of in Assembler, because it depends on information
- // in the AST that isn't replicated in the Node structure.
- static const int kMaxBacksearchLimit = 1024;
- if (is_end_anchored && !is_start_anchored && !is_sticky &&
- max_length < kMaxBacksearchLimit) {
- macro_assembler->SetCurrentPositionFromEnd(max_length);
- }
-
- if (is_global) {
- RegExpMacroAssembler::GlobalMode mode = RegExpMacroAssembler::GLOBAL;
- if (data->tree->min_match() > 0) {
- mode = RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK;
- } else if (is_unicode) {
- mode = RegExpMacroAssembler::GLOBAL_UNICODE;
- }
- macro_assembler->set_global_mode(mode);
- }
-
- return compiler.Assemble(isolate, macro_assembler.get(), node,
- data->capture_count, pattern);
-}
-
-bool RegExpEngine::TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
- Heap* heap = isolate->heap();
- bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
- if (isolate->total_regexp_code_generated() >
- RegExpImpl::kRegExpCompiledLimit &&
- heap->CommittedMemoryExecutable() >
- RegExpImpl::kRegExpExecutableMemoryLimit) {
- too_much = true;
- }
- return too_much;
-}
-
-Object RegExpResultsCache::Lookup(Heap* heap, String key_string,
- Object key_pattern,
- FixedArray* last_match_cache,
- ResultsCacheType type) {
- FixedArray cache;
- if (!key_string.IsInternalizedString()) return Smi::kZero;
- if (type == STRING_SPLIT_SUBSTRINGS) {
- DCHECK(key_pattern.IsString());
- if (!key_pattern.IsInternalizedString()) return Smi::kZero;
- cache = heap->string_split_cache();
- } else {
- DCHECK(type == REGEXP_MULTIPLE_INDICES);
- DCHECK(key_pattern.IsFixedArray());
- cache = heap->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string.Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache.get(index + kStringOffset) != key_string ||
- cache.get(index + kPatternOffset) != key_pattern) {
- index =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache.get(index + kStringOffset) != key_string ||
- cache.get(index + kPatternOffset) != key_pattern) {
- return Smi::kZero;
- }
- }
-
- *last_match_cache = FixedArray::cast(cache.get(index + kLastMatchOffset));
- return cache.get(index + kArrayOffset);
-}
-
-void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
- Handle<Object> key_pattern,
- Handle<FixedArray> value_array,
- Handle<FixedArray> last_match_cache,
- ResultsCacheType type) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> cache;
- if (!key_string->IsInternalizedString()) return;
- if (type == STRING_SPLIT_SUBSTRINGS) {
- DCHECK(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return;
- cache = factory->string_split_cache();
- } else {
- DCHECK(type == REGEXP_MULTIPLE_INDICES);
- DCHECK(key_pattern->IsFixedArray());
- cache = factory->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string->Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == Smi::kZero) {
- cache->set(index + kStringOffset, *key_string);
- cache->set(index + kPatternOffset, *key_pattern);
- cache->set(index + kArrayOffset, *value_array);
- cache->set(index + kLastMatchOffset, *last_match_cache);
- } else {
- uint32_t index2 =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index2 + kStringOffset) == Smi::kZero) {
- cache->set(index2 + kStringOffset, *key_string);
- cache->set(index2 + kPatternOffset, *key_pattern);
- cache->set(index2 + kArrayOffset, *value_array);
- cache->set(index2 + kLastMatchOffset, *last_match_cache);
- } else {
- cache->set(index2 + kStringOffset, Smi::kZero);
- cache->set(index2 + kPatternOffset, Smi::kZero);
- cache->set(index2 + kArrayOffset, Smi::kZero);
- cache->set(index2 + kLastMatchOffset, Smi::kZero);
- cache->set(index + kStringOffset, *key_string);
- cache->set(index + kPatternOffset, *key_pattern);
- cache->set(index + kArrayOffset, *value_array);
- cache->set(index + kLastMatchOffset, *last_match_cache);
- }
- }
- // If the array is a reasonably short list of substrings, convert it into a
- // list of internalized strings.
- if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
- for (int i = 0; i < value_array->length(); i++) {
- Handle<String> str(String::cast(value_array->get(i)), isolate);
- Handle<String> internalized_str = factory->InternalizeString(str);
- value_array->set(i, *internalized_str);
- }
- }
- // Convert backing store to a copy-on-write array.
- value_array->set_map_no_write_barrier(
- ReadOnlyRoots(isolate).fixed_cow_array_map());
-}
-
-void RegExpResultsCache::Clear(FixedArray cache) {
- for (int i = 0; i < kRegExpResultsCacheSize; i++) {
- cache.set(i, Smi::kZero);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
deleted file mode 100644
index 832c7e3aa5..0000000000
--- a/deps/v8/src/regexp/jsregexp.h
+++ /dev/null
@@ -1,1548 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_REGEXP_JSREGEXP_H_
-#define V8_REGEXP_JSREGEXP_H_
-
-#include "src/execution/isolate.h"
-#include "src/objects/js-regexp.h"
-#include "src/regexp/regexp-ast.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/utils/allocation.h"
-#include "src/zone/zone-splay-tree.h"
-
-namespace v8 {
-namespace internal {
-
-class NodeVisitor;
-class RegExpCompiler;
-class RegExpMacroAssembler;
-class RegExpNode;
-class RegExpTree;
-class BoyerMooreLookahead;
-
-inline bool IgnoreCase(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kIgnoreCase) != 0;
-}
-
-inline bool IsUnicode(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kUnicode) != 0;
-}
-
-inline bool IsSticky(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kSticky) != 0;
-}
-
-inline bool IsGlobal(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kGlobal) != 0;
-}
-
-inline bool DotAll(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kDotAll) != 0;
-}
-
-inline bool Multiline(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kMultiline) != 0;
-}
-
-inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
- // Both unicode and ignore_case flags are set. We need to use ICU to find
- // the closure over case equivalents.
- return IsUnicode(flags) && IgnoreCase(flags);
-}
-
-class RegExpImpl {
- public:
- // Whether the irregexp engine generates native code or interpreter bytecode.
- static bool UsesNativeRegExp() { return !FLAG_regexp_interpret_all; }
-
- // Returns a string representation of a regular expression.
- // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
- // This function calls the garbage collector if necessary.
- static Handle<String> ToString(Handle<Object> value);
-
- // Parses the RegExp pattern and prepares the JSRegExp object with
- // generic data and choice of implementation - as well as what
- // the implementation wants to store in the data field.
- // Returns false if compilation fails.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
- Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- JSRegExp::Flags flags);
-
- // See ECMA-262 section 15.10.6.2.
- // This function calls the garbage collector if necessary.
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
- Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info);
-
- // Prepares a JSRegExp object with Irregexp-specific data.
- static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
- int capture_register_count);
-
- static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
- Handle<String> match_pattern);
-
- static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index, int32_t* output,
- int output_size);
-
- static Handle<Object> AtomExec(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info);
-
- enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
-
- // Prepare a RegExp for being executed one or more times (using
- // IrregexpExecOnce) on the subject.
- // This ensures that the regexp is compiled for the subject, and that
- // the subject is flat.
- // Returns the number of integer spaces required by IrregexpExecOnce
- // as its "registers" argument. If the regexp cannot be compiled,
- // an exception is set as pending, and this function returns negative.
- static int IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject);
-
- // Execute a regular expression on the subject, starting from index.
- // If matching succeeds, return the number of matches. This can be larger
- // than one in the case of global regular expressions.
- // The captures and subcaptures are stored into the registers vector.
- // If matching fails, returns RE_FAILURE.
- // If execution fails, sets a pending exception and returns RE_EXCEPTION.
- static int IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index, int32_t* output,
- int output_size);
-
- // Execute an Irregexp bytecode pattern.
- // On a successful match, the result is a JSArray containing
- // captured positions. On a failure, the result is the null value.
- // Returns an empty handle in case of an exception.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> IrregexpExec(
- Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info);
-
- // Set last match info. If match is nullptr, then setting captures is
- // omitted.
- static Handle<RegExpMatchInfo> SetLastMatchInfo(
- Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
- Handle<String> subject, int capture_count, int32_t* match);
-
- class GlobalCache {
- public:
- GlobalCache(Handle<JSRegExp> regexp,
- Handle<String> subject,
- Isolate* isolate);
-
- V8_INLINE ~GlobalCache();
-
- // Fetch the next entry in the cache for global regexp match results.
- // This does not set the last match info. Upon failure, nullptr is
- // returned. The cause can be checked with Result(). The previous result is
- // still in available in memory when a failure happens.
- V8_INLINE int32_t* FetchNext();
-
- V8_INLINE int32_t* LastSuccessfulMatch();
-
- V8_INLINE bool HasException() { return num_matches_ < 0; }
-
- private:
- int AdvanceZeroLength(int last_index);
-
- int num_matches_;
- int max_matches_;
- int current_match_index_;
- int registers_per_match_;
- // Pointer to the last set of captures.
- int32_t* register_array_;
- int register_array_size_;
- Handle<JSRegExp> regexp_;
- Handle<String> subject_;
- Isolate* isolate_;
- };
-
- // For acting on the JSRegExp data FixedArray.
- static int IrregexpMaxRegisterCount(FixedArray re);
- static void SetIrregexpMaxRegisterCount(FixedArray re, int value);
- static void SetIrregexpCaptureNameMap(FixedArray re,
- Handle<FixedArray> value);
- static int IrregexpNumberOfCaptures(FixedArray re);
- static int IrregexpNumberOfRegisters(FixedArray re);
- static ByteArray IrregexpByteCode(FixedArray re, bool is_one_byte);
- static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
-
- // Limit the space regexps take up on the heap. In order to limit this we
- // would like to keep track of the amount of regexp code on the heap. This
- // is not tracked, however. As a conservative approximation we track the
- // total regexp code compiled including code that has subsequently been freed
- // and the total executable memory at any point.
- static const size_t kRegExpExecutableMemoryLimit = 16 * MB;
- static const size_t kRegExpCompiledLimit = 1 * MB;
- static const int kRegExpTooLargeToOptimize = 20 * KB;
-
- private:
- static bool CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> sample_subject, bool is_one_byte);
- static inline bool EnsureCompiledIrregexp(Isolate* isolate,
- Handle<JSRegExp> re,
- Handle<String> sample_subject,
- bool is_one_byte);
-};
-
-
-// Represents the location of one element relative to the intersection of
-// two sets. Corresponds to the four areas of a Venn diagram.
-enum ElementInSetsRelation {
- kInsideNone = 0,
- kInsideFirst = 1,
- kInsideSecond = 2,
- kInsideBoth = 3
-};
-
-
-// A set of unsigned integers that behaves especially well on small
-// integers (< 32). May do zone-allocation.
-class OutSet: public ZoneObject {
- public:
- OutSet() : first_(0), remaining_(nullptr), successors_(nullptr) {}
- OutSet* Extend(unsigned value, Zone* zone);
- V8_EXPORT_PRIVATE bool Get(unsigned value) const;
- static const unsigned kFirstLimit = 32;
-
- private:
- // Destructively set a value in this set. In most cases you want
- // to use Extend instead to ensure that only one instance exists
- // that contains the same values.
- void Set(unsigned value, Zone* zone);
-
- // The successors are a list of sets that contain the same values
- // as this set and the one more value that is not present in this
- // set.
- ZoneList<OutSet*>* successors(Zone* zone) { return successors_; }
-
- OutSet(uint32_t first, ZoneList<unsigned>* remaining)
- : first_(first), remaining_(remaining), successors_(nullptr) {}
- uint32_t first_;
- ZoneList<unsigned>* remaining_;
- ZoneList<OutSet*>* successors_;
- friend class Trace;
-};
-
-
-// A mapping from integers, specified as ranges, to a set of integers.
-// Used for mapping character ranges to choices.
-class DispatchTable : public ZoneObject {
- public:
- explicit DispatchTable(Zone* zone) : tree_(zone) { }
-
- class Entry {
- public:
- Entry() : from_(0), to_(0), out_set_(nullptr) {}
- Entry(uc32 from, uc32 to, OutSet* out_set)
- : from_(from), to_(to), out_set_(out_set) {
- DCHECK(from <= to);
- }
- uc32 from() { return from_; }
- uc32 to() { return to_; }
- void set_to(uc32 value) { to_ = value; }
- void AddValue(int value, Zone* zone) {
- out_set_ = out_set_->Extend(value, zone);
- }
- OutSet* out_set() { return out_set_; }
- private:
- uc32 from_;
- uc32 to_;
- OutSet* out_set_;
- };
-
- class Config {
- public:
- using Key = uc32;
- using Value = Entry;
- static const uc32 kNoKey;
- static const Entry NoValue() { return Value(); }
- static inline int Compare(uc32 a, uc32 b) {
- if (a == b)
- return 0;
- else if (a < b)
- return -1;
- else
- return 1;
- }
- };
-
- V8_EXPORT_PRIVATE void AddRange(CharacterRange range, int value, Zone* zone);
- V8_EXPORT_PRIVATE OutSet* Get(uc32 value);
- void Dump();
-
- template <typename Callback>
- void ForEach(Callback* callback) {
- return tree()->ForEach(callback);
- }
-
- private:
- // There can't be a static empty set since it allocates its
- // successors in a zone and caches them.
- OutSet* empty() { return &empty_; }
- OutSet empty_;
- ZoneSplayTree<Config>* tree() { return &tree_; }
- ZoneSplayTree<Config> tree_;
-};
-
-
-// Categorizes character ranges into BMP, non-BMP, lead, and trail surrogates.
-class UnicodeRangeSplitter {
- public:
- V8_EXPORT_PRIVATE UnicodeRangeSplitter(Zone* zone,
- ZoneList<CharacterRange>* base);
- void Call(uc32 from, DispatchTable::Entry entry);
-
- ZoneList<CharacterRange>* bmp() { return bmp_; }
- ZoneList<CharacterRange>* lead_surrogates() { return lead_surrogates_; }
- ZoneList<CharacterRange>* trail_surrogates() { return trail_surrogates_; }
- ZoneList<CharacterRange>* non_bmp() const { return non_bmp_; }
-
- private:
- static const int kBase = 0;
- // Separate ranges into
- static const int kBmpCodePoints = 1;
- static const int kLeadSurrogates = 2;
- static const int kTrailSurrogates = 3;
- static const int kNonBmpCodePoints = 4;
-
- Zone* zone_;
- DispatchTable table_;
- ZoneList<CharacterRange>* bmp_;
- ZoneList<CharacterRange>* lead_surrogates_;
- ZoneList<CharacterRange>* trail_surrogates_;
- ZoneList<CharacterRange>* non_bmp_;
-};
-
-#define FOR_EACH_NODE_TYPE(VISIT) \
- VISIT(End) \
- VISIT(Action) \
- VISIT(Choice) \
- VISIT(BackReference) \
- VISIT(Assertion) \
- VISIT(Text)
-
-
-class Trace;
-struct PreloadState;
-class GreedyLoopState;
-class AlternativeGenerationList;
-
-struct NodeInfo {
- NodeInfo()
- : being_analyzed(false),
- been_analyzed(false),
- follows_word_interest(false),
- follows_newline_interest(false),
- follows_start_interest(false),
- at_end(false),
- visited(false),
- replacement_calculated(false) { }
-
- // Returns true if the interests and assumptions of this node
- // matches the given one.
- bool Matches(NodeInfo* that) {
- return (at_end == that->at_end) &&
- (follows_word_interest == that->follows_word_interest) &&
- (follows_newline_interest == that->follows_newline_interest) &&
- (follows_start_interest == that->follows_start_interest);
- }
-
- // Updates the interests of this node given the interests of the
- // node preceding it.
- void AddFromPreceding(NodeInfo* that) {
- at_end |= that->at_end;
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- bool HasLookbehind() {
- return follows_word_interest ||
- follows_newline_interest ||
- follows_start_interest;
- }
-
- // Sets the interests of this node to include the interests of the
- // following node.
- void AddFromFollowing(NodeInfo* that) {
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- void ResetCompilationState() {
- being_analyzed = false;
- been_analyzed = false;
- }
-
- bool being_analyzed: 1;
- bool been_analyzed: 1;
-
- // These bits are set of this node has to know what the preceding
- // character was.
- bool follows_word_interest: 1;
- bool follows_newline_interest: 1;
- bool follows_start_interest: 1;
-
- bool at_end: 1;
- bool visited: 1;
- bool replacement_calculated: 1;
-};
-
-
-// Details of a quick mask-compare check that can look ahead in the
-// input stream.
-class QuickCheckDetails {
- public:
- QuickCheckDetails()
- : characters_(0),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- explicit QuickCheckDetails(int characters)
- : characters_(characters),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- bool Rationalize(bool one_byte);
- // Merge in the information from another branch of an alternation.
- void Merge(QuickCheckDetails* other, int from_index);
- // Advance the current position by some amount.
- void Advance(int by, bool one_byte);
- void Clear();
- bool cannot_match() { return cannot_match_; }
- void set_cannot_match() { cannot_match_ = true; }
- struct Position {
- Position() : mask(0), value(0), determines_perfectly(false) { }
- uc16 mask;
- uc16 value;
- bool determines_perfectly;
- };
- int characters() { return characters_; }
- void set_characters(int characters) { characters_ = characters; }
- Position* positions(int index) {
- DCHECK_LE(0, index);
- DCHECK_GT(characters_, index);
- return positions_ + index;
- }
- uint32_t mask() { return mask_; }
- uint32_t value() { return value_; }
-
- private:
- // How many characters do we have quick check information from. This is
- // the same for all branches of a choice node.
- int characters_;
- Position positions_[4];
- // These values are the condensate of the above array after Rationalize().
- uint32_t mask_;
- uint32_t value_;
- // If set to true, there is no way this quick check can match at all.
- // E.g., if it requires to be at the start of the input, and isn't.
- bool cannot_match_;
-};
-
-
-extern int kUninitializedRegExpNodePlaceHolder;
-
-
-class RegExpNode: public ZoneObject {
- public:
- explicit RegExpNode(Zone* zone)
- : replacement_(nullptr),
- on_work_list_(false),
- trace_count_(0),
- zone_(zone) {
- bm_info_[0] = bm_info_[1] = nullptr;
- }
- virtual ~RegExpNode();
- virtual void Accept(NodeVisitor* visitor) = 0;
- // Generates a goto to this node or actually generates the code at this point.
- virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
- // How many characters must this node consume at a minimum in order to
- // succeed. If we have found at least 'still_to_find' characters that
- // must be consumed there is no need to ask any following nodes whether
- // they are sure to eat any more characters. The not_at_start argument is
- // used to indicate that we know we are not at the start of the input. In
- // this case anchored branches will always fail and can be ignored when
- // determining how many characters are consumed on success.
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
- // Emits some quick code that checks whether the preloaded characters match.
- // Falls through on certain failure, jumps to the label on possible success.
- // If the node cannot make a quick check it does nothing and returns false.
- bool EmitQuickCheck(RegExpCompiler* compiler,
- Trace* bounds_check_trace,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details_return,
- bool fall_through_on_failure);
- // For a given number of characters this returns a mask and a value. The
- // next n characters are anded with the mask and compared with the value.
- // A comparison failure indicates the node cannot match the next n characters.
- // A comparison success indicates the node may match.
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) = 0;
- static const int kNodeIsTooComplexForGreedyLoops = kMinInt;
- virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
- // Only returns the successor for a text node of length 1 that matches any
- // character and that has no guards on it.
- virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) {
- return nullptr;
- }
-
- // Collects information on the possible code units (mod 128) that can match if
- // we look forward. This is used for a Boyer-Moore-like string searching
- // implementation. TODO(erikcorry): This should share more code with
- // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
- // the number of nodes we are willing to look at in order to create this data.
- static const int kRecursionBudget = 200;
- bool KeepRecursing(RegExpCompiler* compiler);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- UNREACHABLE();
- }
-
- // If we know that the input is one-byte then there are some nodes that can
- // never match. This method returns a node that can be substituted for
- // itself, or nullptr if the node can never match.
- virtual RegExpNode* FilterOneByte(int depth) { return this; }
- // Helper for FilterOneByte.
- RegExpNode* replacement() {
- DCHECK(info()->replacement_calculated);
- return replacement_;
- }
- RegExpNode* set_replacement(RegExpNode* replacement) {
- info()->replacement_calculated = true;
- replacement_ = replacement;
- return replacement; // For convenience.
- }
-
- // We want to avoid recalculating the lookahead info, so we store it on the
- // node. Only info that is for this node is stored. We can tell that the
- // info is for this node when offset == 0, so the information is calculated
- // relative to this node.
- void SaveBMInfo(BoyerMooreLookahead* bm, bool not_at_start, int offset) {
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
-
- Label* label() { return &label_; }
- // If non-generic code is generated for a node (i.e. the node is not at the
- // start of the trace) then it cannot be reused. This variable sets a limit
- // on how often we allow that to happen before we insist on starting a new
- // trace and generating generic code for a node that can be reused by flushing
- // the deferred actions in the current trace and generating a goto.
- static const int kMaxCopiesCodeGenerated = 10;
-
- bool on_work_list() { return on_work_list_; }
- void set_on_work_list(bool value) { on_work_list_ = value; }
-
- NodeInfo* info() { return &info_; }
-
- BoyerMooreLookahead* bm_info(bool not_at_start) {
- return bm_info_[not_at_start ? 1 : 0];
- }
-
- Zone* zone() const { return zone_; }
-
- protected:
- enum LimitResult { DONE, CONTINUE };
- RegExpNode* replacement_;
-
- LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
-
- void set_bm_info(bool not_at_start, BoyerMooreLookahead* bm) {
- bm_info_[not_at_start ? 1 : 0] = bm;
- }
-
- private:
- static const int kFirstCharBudget = 10;
- Label label_;
- bool on_work_list_;
- NodeInfo info_;
- // This variable keeps track of how many times code has been generated for
- // this node (in different traces). We don't keep track of where the
- // generated code is located unless the code is generated at the start of
- // a trace, in which case it is generic and can be reused by flushing the
- // deferred operations in the current trace and generating a goto.
- int trace_count_;
- BoyerMooreLookahead* bm_info_[2];
-
- Zone* zone_;
-};
-
-
-class SeqRegExpNode: public RegExpNode {
- public:
- explicit SeqRegExpNode(RegExpNode* on_success)
- : RegExpNode(on_success->zone()), on_success_(on_success) { }
- RegExpNode* on_success() { return on_success_; }
- void set_on_success(RegExpNode* node) { on_success_ = node; }
- RegExpNode* FilterOneByte(int depth) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override {
- on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
-
- protected:
- RegExpNode* FilterSuccessor(int depth);
-
- private:
- RegExpNode* on_success_;
-};
-
-
-class ActionNode: public SeqRegExpNode {
- public:
- enum ActionType {
- SET_REGISTER,
- INCREMENT_REGISTER,
- STORE_POSITION,
- BEGIN_SUBMATCH,
- POSITIVE_SUBMATCH_SUCCESS,
- EMPTY_MATCH_CHECK,
- CLEAR_CAPTURES
- };
- static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
- static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
- static ActionNode* StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success);
- static ActionNode* ClearCaptures(Interval range, RegExpNode* on_success);
- static ActionNode* BeginSubmatch(int stack_pointer_reg,
- int position_reg,
- RegExpNode* on_success);
- static ActionNode* PositiveSubmatchSuccess(int stack_pointer_reg,
- int restore_reg,
- int clear_capture_count,
- int clear_capture_from,
- RegExpNode* on_success);
- static ActionNode* EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success);
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int filled_in,
- bool not_at_start) override {
- return on_success()->GetQuickCheckDetails(
- details, compiler, filled_in, not_at_start);
- }
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- ActionType action_type() { return action_type_; }
- // TODO(erikcorry): We should allow some action nodes in greedy loops.
- int GreedyLoopTextLength() override {
- return kNodeIsTooComplexForGreedyLoops;
- }
-
- private:
- union {
- struct {
- int reg;
- int value;
- } u_store_register;
- struct {
- int reg;
- } u_increment_register;
- struct {
- int reg;
- bool is_capture;
- } u_position_register;
- struct {
- int stack_pointer_register;
- int current_position_register;
- int clear_register_count;
- int clear_register_from;
- } u_submatch;
- struct {
- int start_register;
- int repetition_register;
- int repetition_limit;
- } u_empty_match_check;
- struct {
- int range_from;
- int range_to;
- } u_clear_captures;
- } data_;
- ActionNode(ActionType action_type, RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- action_type_(action_type) { }
- ActionType action_type_;
- friend class DotPrinter;
-};
-
-
-class TextNode: public SeqRegExpNode {
- public:
- TextNode(ZoneList<TextElement>* elms, bool read_backward,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success), elms_(elms), read_backward_(read_backward) {}
- TextNode(RegExpCharacterClass* that, bool read_backward,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(new (zone()) ZoneList<TextElement>(1, zone())),
- read_backward_(read_backward) {
- elms_->Add(TextElement::CharClass(that), zone());
- }
- // Create TextNode for a single character class for the given ranges.
- static TextNode* CreateForCharacterRanges(Zone* zone,
- ZoneList<CharacterRange>* ranges,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags);
- // Create TextNode for a surrogate pair with a range given for the
- // lead and the trail surrogate each.
- static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
- CharacterRange trail,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags);
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- ZoneList<TextElement>* elements() { return elms_; }
- bool read_backward() { return read_backward_; }
- void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
- int GreedyLoopTextLength() override;
- RegExpNode* GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- void CalculateOffsets();
- RegExpNode* FilterOneByte(int depth) override;
-
- private:
- enum TextEmitPassType {
- NON_LATIN1_MATCH, // Check for characters that can't match.
- SIMPLE_CHARACTER_MATCH, // Case-dependent single character check.
- NON_LETTER_CHARACTER_MATCH, // Check characters that have no case equivs.
- CASE_CHARACTER_MATCH, // Case-independent single character check.
- CHARACTER_CLASS_MATCH // Character class.
- };
- static bool SkipPass(TextEmitPassType pass, bool ignore_case);
- static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
- static const int kLastPass = CHARACTER_CLASS_MATCH;
- void TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to);
- int Length();
- ZoneList<TextElement>* elms_;
- bool read_backward_;
-};
-
-
-class AssertionNode: public SeqRegExpNode {
- public:
- enum AssertionType {
- AT_END,
- AT_START,
- AT_BOUNDARY,
- AT_NON_BOUNDARY,
- AFTER_NEWLINE
- };
- static AssertionNode* AtEnd(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_END, on_success);
- }
- static AssertionNode* AtStart(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_START, on_success);
- }
- static AssertionNode* AtBoundary(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_BOUNDARY, on_success);
- }
- static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_NON_BOUNDARY, on_success);
- }
- static AssertionNode* AfterNewline(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
- }
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- AssertionType assertion_type() { return assertion_type_; }
-
- private:
- void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
- enum IfPrevious { kIsNonWord, kIsWord };
- void BacktrackIfPrevious(RegExpCompiler* compiler,
- Trace* trace,
- IfPrevious backtrack_if_previous);
- AssertionNode(AssertionType t, RegExpNode* on_success)
- : SeqRegExpNode(on_success), assertion_type_(t) { }
- AssertionType assertion_type_;
-};
-
-
-class BackReferenceNode: public SeqRegExpNode {
- public:
- BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
- bool read_backward, RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- start_reg_(start_reg),
- end_reg_(end_reg),
- flags_(flags),
- read_backward_(read_backward) {}
- void Accept(NodeVisitor* visitor) override;
- int start_register() { return start_reg_; }
- int end_register() { return end_reg_; }
- bool read_backward() { return read_backward_; }
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int recursion_depth,
- bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override {
- return;
- }
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
-
- private:
- int start_reg_;
- int end_reg_;
- JSRegExp::Flags flags_;
- bool read_backward_;
-};
-
-
-class EndNode: public RegExpNode {
- public:
- enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
- EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int recursion_depth,
- bool not_at_start) override {
- return 0;
- }
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override {
- // Returning 0 from EatsAtLeast should ensure we never get here.
- UNREACHABLE();
- }
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override {
- // Returning 0 from EatsAtLeast should ensure we never get here.
- UNREACHABLE();
- }
-
- private:
- Action action_;
-};
-
-
-class NegativeSubmatchSuccess: public EndNode {
- public:
- NegativeSubmatchSuccess(int stack_pointer_reg,
- int position_reg,
- int clear_capture_count,
- int clear_capture_start,
- Zone* zone)
- : EndNode(NEGATIVE_SUBMATCH_SUCCESS, zone),
- stack_pointer_register_(stack_pointer_reg),
- current_position_register_(position_reg),
- clear_capture_count_(clear_capture_count),
- clear_capture_start_(clear_capture_start) { }
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
-
- private:
- int stack_pointer_register_;
- int current_position_register_;
- int clear_capture_count_;
- int clear_capture_start_;
-};
-
-
-class Guard: public ZoneObject {
- public:
- enum Relation { LT, GEQ };
- Guard(int reg, Relation op, int value)
- : reg_(reg),
- op_(op),
- value_(value) { }
- int reg() { return reg_; }
- Relation op() { return op_; }
- int value() { return value_; }
-
- private:
- int reg_;
- Relation op_;
- int value_;
-};
-
-
-class GuardedAlternative {
- public:
- explicit GuardedAlternative(RegExpNode* node)
- : node_(node), guards_(nullptr) {}
- void AddGuard(Guard* guard, Zone* zone);
- RegExpNode* node() { return node_; }
- void set_node(RegExpNode* node) { node_ = node; }
- ZoneList<Guard*>* guards() { return guards_; }
-
- private:
- RegExpNode* node_;
- ZoneList<Guard*>* guards_;
-};
-
-
-class AlternativeGeneration;
-
-
-class ChoiceNode: public RegExpNode {
- public:
- explicit ChoiceNode(int expected_size, Zone* zone)
- : RegExpNode(zone),
- alternatives_(new (zone)
- ZoneList<GuardedAlternative>(expected_size, zone)),
- table_(nullptr),
- not_at_start_(false),
- being_calculated_(false) {}
- void Accept(NodeVisitor* visitor) override;
- void AddAlternative(GuardedAlternative node) {
- alternatives()->Add(node, zone());
- }
- ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
- DispatchTable* GetTable(bool ignore_case);
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- int EatsAtLeastHelper(int still_to_find,
- int budget,
- RegExpNode* ignore_this_node,
- bool not_at_start);
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
-
- bool being_calculated() { return being_calculated_; }
- bool not_at_start() { return not_at_start_; }
- void set_not_at_start() { not_at_start_ = true; }
- void set_being_calculated(bool b) { being_calculated_ = b; }
- virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
- return true;
- }
- RegExpNode* FilterOneByte(int depth) override;
- virtual bool read_backward() { return false; }
-
- protected:
- int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
- ZoneList<GuardedAlternative>* alternatives_;
-
- private:
- friend class DispatchTableConstructor;
- friend class Analysis;
- void GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace);
- int CalculatePreloadCharacters(RegExpCompiler* compiler, int eats_at_least);
- void EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload);
- void SetUpPreLoad(RegExpCompiler* compiler,
- Trace* current_trace,
- PreloadState* preloads);
- void AssertGuardsMentionRegisters(Trace* trace);
- int EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler, Trace* trace);
- Trace* EmitGreedyLoop(RegExpCompiler* compiler,
- Trace* trace,
- AlternativeGenerationList* alt_gens,
- PreloadState* preloads,
- GreedyLoopState* greedy_loop_state,
- int text_length);
- void EmitChoices(RegExpCompiler* compiler,
- AlternativeGenerationList* alt_gens,
- int first_choice,
- Trace* trace,
- PreloadState* preloads);
- DispatchTable* table_;
- // If true, this node is never checked at the start of the input.
- // Allows a new trace to start with at_start() set to false.
- bool not_at_start_;
- bool being_calculated_;
-};
-
-
-class NegativeLookaroundChoiceNode : public ChoiceNode {
- public:
- explicit NegativeLookaroundChoiceNode(GuardedAlternative this_must_fail,
- GuardedAlternative then_do_this,
- Zone* zone)
- : ChoiceNode(2, zone) {
- AddAlternative(this_must_fail);
- AddAlternative(then_do_this);
- }
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override {
- alternatives_->at(1).node()->FillInBMInfo(isolate, offset, budget - 1, bm,
- not_at_start);
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
- // For a negative lookahead we don't emit the quick check for the
- // alternative that is expected to fail. This is because quick check code
- // starts by loading enough characters for the alternative that takes fewest
- // characters, but on a negative lookahead the negative branch did not take
- // part in that calculation (EatsAtLeast) so the assumptions don't hold.
- bool try_to_emit_quick_check_for_alternative(bool is_first) override {
- return !is_first;
- }
- RegExpNode* FilterOneByte(int depth) override;
-};
-
-
-class LoopChoiceNode: public ChoiceNode {
- public:
- LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
- : ChoiceNode(2, zone),
- loop_node_(nullptr),
- continue_node_(nullptr),
- body_can_be_zero_length_(body_can_be_zero_length),
- read_backward_(read_backward) {}
- void AddLoopAlternative(GuardedAlternative alt);
- void AddContinueAlternative(GuardedAlternative alt);
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- RegExpNode* loop_node() { return loop_node_; }
- RegExpNode* continue_node() { return continue_node_; }
- bool body_can_be_zero_length() { return body_can_be_zero_length_; }
- bool read_backward() override { return read_backward_; }
- void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth) override;
-
- private:
- // AddAlternative is made private for loop nodes because alternatives
- // should not be added freely, we need to keep track of which node
- // goes back to the node itself.
- void AddAlternative(GuardedAlternative node) {
- ChoiceNode::AddAlternative(node);
- }
-
- RegExpNode* loop_node_;
- RegExpNode* continue_node_;
- bool body_can_be_zero_length_;
- bool read_backward_;
-};
-
-
-// Improve the speed that we scan for an initial point where a non-anchored
-// regexp can match by using a Boyer-Moore-like table. This is done by
-// identifying non-greedy non-capturing loops in the nodes that eat any
-// character one at a time. For example in the middle of the regexp
-// /foo[\s\S]*?bar/ we find such a loop. There is also such a loop implicitly
-// inserted at the start of any non-anchored regexp.
-//
-// When we have found such a loop we look ahead in the nodes to find the set of
-// characters that can come at given distances. For example for the regexp
-// /.?foo/ we know that there are at least 3 characters ahead of us, and the
-// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
-// the lookahead info where the set of characters is reasonably constrained. In
-// our example this is from index 1 to 2 (0 is not constrained). We can now
-// look 3 characters ahead and if we don't find one of [f, o] (the union of
-// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
-//
-// For Unicode input strings we do the same, but modulo 128.
-//
-// We also look at the first string fed to the regexp and use that to get a hint
-// of the character frequencies in the inputs. This affects the assessment of
-// whether the set of characters is 'reasonably constrained'.
-//
-// We also have another lookahead mechanism (called quick check in the code),
-// which uses a wide load of multiple characters followed by a mask and compare
-// to determine whether a match is possible at this point.
-enum ContainedInLattice {
- kNotYet = 0,
- kLatticeIn = 1,
- kLatticeOut = 2,
- kLatticeUnknown = 3 // Can also mean both in and out.
-};
-
-
-inline ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b) {
- return static_cast<ContainedInLattice>(a | b);
-}
-
-
-ContainedInLattice AddRange(ContainedInLattice a,
- const int* ranges,
- int ranges_size,
- Interval new_range);
-
-
-class BoyerMoorePositionInfo : public ZoneObject {
- public:
- explicit BoyerMoorePositionInfo(Zone* zone)
- : map_(new(zone) ZoneList<bool>(kMapSize, zone)),
- map_count_(0),
- w_(kNotYet),
- s_(kNotYet),
- d_(kNotYet),
- surrogate_(kNotYet) {
- for (int i = 0; i < kMapSize; i++) {
- map_->Add(false, zone);
- }
- }
-
- bool& at(int i) { return map_->at(i); }
-
- static const int kMapSize = 128;
- static const int kMask = kMapSize - 1;
-
- int map_count() const { return map_count_; }
-
- void Set(int character);
- void SetInterval(const Interval& interval);
- void SetAll();
- bool is_non_word() { return w_ == kLatticeOut; }
- bool is_word() { return w_ == kLatticeIn; }
-
- private:
- ZoneList<bool>* map_;
- int map_count_; // Number of set bits in the map.
- ContainedInLattice w_; // The \w character class.
- ContainedInLattice s_; // The \s character class.
- ContainedInLattice d_; // The \d character class.
- ContainedInLattice surrogate_; // Surrogate UTF-16 code units.
-};
-
-
-class BoyerMooreLookahead : public ZoneObject {
- public:
- BoyerMooreLookahead(int length, RegExpCompiler* compiler, Zone* zone);
-
- int length() { return length_; }
- int max_char() { return max_char_; }
- RegExpCompiler* compiler() { return compiler_; }
-
- int Count(int map_number) {
- return bitmaps_->at(map_number)->map_count();
- }
-
- BoyerMoorePositionInfo* at(int i) { return bitmaps_->at(i); }
-
- void Set(int map_number, int character) {
- if (character > max_char_) return;
- BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
- info->Set(character);
- }
-
- void SetInterval(int map_number, const Interval& interval) {
- if (interval.from() > max_char_) return;
- BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
- if (interval.to() > max_char_) {
- info->SetInterval(Interval(interval.from(), max_char_));
- } else {
- info->SetInterval(interval);
- }
- }
-
- void SetAll(int map_number) {
- bitmaps_->at(map_number)->SetAll();
- }
-
- void SetRest(int from_map) {
- for (int i = from_map; i < length_; i++) SetAll(i);
- }
- void EmitSkipInstructions(RegExpMacroAssembler* masm);
-
- private:
- // This is the value obtained by EatsAtLeast. If we do not have at least this
- // many characters left in the sample string then the match is bound to fail.
- // Therefore it is OK to read a character this far ahead of the current match
- // point.
- int length_;
- RegExpCompiler* compiler_;
- // 0xff for Latin1, 0xffff for UTF-16.
- int max_char_;
- ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
-
- int GetSkipTable(int min_lookahead,
- int max_lookahead,
- Handle<ByteArray> boolean_skip_table);
- bool FindWorthwhileInterval(int* from, int* to);
- int FindBestInterval(
- int max_number_of_chars, int old_biggest_points, int* from, int* to);
-};
-
-
-// There are many ways to generate code for a node. This class encapsulates
-// the current way we should be generating. In other words it encapsulates
-// the current state of the code generator. The effect of this is that we
-// generate code for paths that the matcher can take through the regular
-// expression. A given node in the regexp can be code-generated several times
-// as it can be part of several traces. For example for the regexp:
-// /foo(bar|ip)baz/ the code to match baz will be generated twice, once as part
-// of the foo-bar-baz trace and once as part of the foo-ip-baz trace. The code
-// to match foo is generated only once (the traces have a common prefix). The
-// code to store the capture is deferred and generated (twice) after the places
-// where baz has been matched.
-class Trace {
- public:
- // A value for a property that is either known to be true, know to be false,
- // or not known.
- enum TriBool {
- UNKNOWN = -1, FALSE_VALUE = 0, TRUE_VALUE = 1
- };
-
- class DeferredAction {
- public:
- DeferredAction(ActionNode::ActionType action_type, int reg)
- : action_type_(action_type), reg_(reg), next_(nullptr) {}
- DeferredAction* next() { return next_; }
- bool Mentions(int reg);
- int reg() { return reg_; }
- ActionNode::ActionType action_type() { return action_type_; }
- private:
- ActionNode::ActionType action_type_;
- int reg_;
- DeferredAction* next_;
- friend class Trace;
- };
-
- class DeferredCapture : public DeferredAction {
- public:
- DeferredCapture(int reg, bool is_capture, Trace* trace)
- : DeferredAction(ActionNode::STORE_POSITION, reg),
- cp_offset_(trace->cp_offset()),
- is_capture_(is_capture) { }
- int cp_offset() { return cp_offset_; }
- bool is_capture() { return is_capture_; }
- private:
- int cp_offset_;
- bool is_capture_;
- void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
- };
-
- class DeferredSetRegister : public DeferredAction {
- public:
- DeferredSetRegister(int reg, int value)
- : DeferredAction(ActionNode::SET_REGISTER, reg),
- value_(value) { }
- int value() { return value_; }
- private:
- int value_;
- };
-
- class DeferredClearCaptures : public DeferredAction {
- public:
- explicit DeferredClearCaptures(Interval range)
- : DeferredAction(ActionNode::CLEAR_CAPTURES, -1),
- range_(range) { }
- Interval range() { return range_; }
- private:
- Interval range_;
- };
-
- class DeferredIncrementRegister : public DeferredAction {
- public:
- explicit DeferredIncrementRegister(int reg)
- : DeferredAction(ActionNode::INCREMENT_REGISTER, reg) { }
- };
-
- Trace()
- : cp_offset_(0),
- actions_(nullptr),
- backtrack_(nullptr),
- stop_node_(nullptr),
- loop_label_(nullptr),
- characters_preloaded_(0),
- bound_checked_up_to_(0),
- flush_budget_(100),
- at_start_(UNKNOWN) {}
-
- // End the trace. This involves flushing the deferred actions in the trace
- // and pushing a backtrack location onto the backtrack stack. Once this is
- // done we can start a new trace or go to one that has already been
- // generated.
- void Flush(RegExpCompiler* compiler, RegExpNode* successor);
- int cp_offset() { return cp_offset_; }
- DeferredAction* actions() { return actions_; }
- // A trivial trace is one that has no deferred actions or other state that
- // affects the assumptions used when generating code. There is no recorded
- // backtrack location in a trivial trace, so with a trivial trace we will
- // generate code that, on a failure to match, gets the backtrack location
- // from the backtrack stack rather than using a direct jump instruction. We
- // always start code generation with a trivial trace and non-trivial traces
- // are created as we emit code for nodes or add to the list of deferred
- // actions in the trace. The location of the code generated for a node using
- // a trivial trace is recorded in a label in the node so that gotos can be
- // generated to that code.
- bool is_trivial() {
- return backtrack_ == nullptr && actions_ == nullptr && cp_offset_ == 0 &&
- characters_preloaded_ == 0 && bound_checked_up_to_ == 0 &&
- quick_check_performed_.characters() == 0 && at_start_ == UNKNOWN;
- }
- TriBool at_start() { return at_start_; }
- void set_at_start(TriBool at_start) { at_start_ = at_start; }
- Label* backtrack() { return backtrack_; }
- Label* loop_label() { return loop_label_; }
- RegExpNode* stop_node() { return stop_node_; }
- int characters_preloaded() { return characters_preloaded_; }
- int bound_checked_up_to() { return bound_checked_up_to_; }
- int flush_budget() { return flush_budget_; }
- QuickCheckDetails* quick_check_performed() { return &quick_check_performed_; }
- bool mentions_reg(int reg);
- // Returns true if a deferred position store exists to the specified
- // register and stores the offset in the out-parameter. Otherwise
- // returns false.
- bool GetStoredPosition(int reg, int* cp_offset);
- // These set methods and AdvanceCurrentPositionInTrace should be used only on
- // new traces - the intention is that traces are immutable after creation.
- void add_action(DeferredAction* new_action) {
- DCHECK(new_action->next_ == nullptr);
- new_action->next_ = actions_;
- actions_ = new_action;
- }
- void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
- void set_stop_node(RegExpNode* node) { stop_node_ = node; }
- void set_loop_label(Label* label) { loop_label_ = label; }
- void set_characters_preloaded(int count) { characters_preloaded_ = count; }
- void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
- void set_flush_budget(int to) { flush_budget_ = to; }
- void set_quick_check_performed(QuickCheckDetails* d) {
- quick_check_performed_ = *d;
- }
- void InvalidateCurrentCharacter();
- void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
-
- private:
- int FindAffectedRegisters(OutSet* affected_registers, Zone* zone);
- void PerformDeferredActions(RegExpMacroAssembler* macro,
- int max_register,
- const OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear,
- Zone* zone);
- void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
- int max_register,
- const OutSet& registers_to_pop,
- const OutSet& registers_to_clear);
- int cp_offset_;
- DeferredAction* actions_;
- Label* backtrack_;
- RegExpNode* stop_node_;
- Label* loop_label_;
- int characters_preloaded_;
- int bound_checked_up_to_;
- QuickCheckDetails quick_check_performed_;
- int flush_budget_;
- TriBool at_start_;
-};
-
-
-class GreedyLoopState {
- public:
- explicit GreedyLoopState(bool not_at_start);
-
- Label* label() { return &label_; }
- Trace* counter_backtrack_trace() { return &counter_backtrack_trace_; }
-
- private:
- Label label_;
- Trace counter_backtrack_trace_;
-};
-
-
-struct PreloadState {
- static const int kEatsAtLeastNotYetInitialized = -1;
- bool preload_is_current_;
- bool preload_has_checked_bounds_;
- int preload_characters_;
- int eats_at_least_;
- void init() {
- eats_at_least_ = kEatsAtLeastNotYetInitialized;
- }
-};
-
-
-class NodeVisitor {
- public:
- virtual ~NodeVisitor() = default;
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that) = 0;
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
-};
-
-
-// Node visitor used to add the start set of the alternatives to the
-// dispatch table of a choice node.
-class V8_EXPORT_PRIVATE DispatchTableConstructor : public NodeVisitor {
- public:
- DispatchTableConstructor(DispatchTable* table, bool ignore_case,
- Zone* zone)
- : table_(table),
- choice_index_(-1),
- ignore_case_(ignore_case),
- zone_(zone) { }
-
- void BuildTable(ChoiceNode* node);
-
- void AddRange(CharacterRange range) {
- table()->AddRange(range, choice_index_, zone_);
- }
-
- void AddInverse(ZoneList<CharacterRange>* ranges);
-
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DispatchTable* table() { return table_; }
- void set_choice_index(int value) { choice_index_ = value; }
-
- protected:
- DispatchTable* table_;
- int choice_index_;
- bool ignore_case_;
- Zone* zone_;
-};
-
-// Assertion propagation moves information about assertions such as
-// \b to the affected nodes. For instance, in /.\b./ information must
-// be propagated to the first '.' that whatever follows needs to know
-// if it matched a word or a non-word, and to the second '.' that it
-// has to check if it succeeds a word or non-word. In this case the
-// result will be something like:
-//
-// +-------+ +------------+
-// | . | | . |
-// +-------+ ---> +------------+
-// | word? | | check word |
-// +-------+ +------------+
-class Analysis: public NodeVisitor {
- public:
- Analysis(Isolate* isolate, bool is_one_byte)
- : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
- void EnsureAnalyzed(RegExpNode* node);
-
-#define DECLARE_VISIT(Type) void Visit##Type(Type##Node* that) override;
- FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- void VisitLoopChoice(LoopChoiceNode* that) override;
-
- bool has_failed() { return error_message_ != nullptr; }
- const char* error_message() {
- DCHECK(error_message_ != nullptr);
- return error_message_;
- }
- void fail(const char* error_message) {
- error_message_ = error_message;
- }
-
- Isolate* isolate() const { return isolate_; }
-
- private:
- Isolate* isolate_;
- bool is_one_byte_;
- const char* error_message_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
-};
-
-
-struct RegExpCompileData {
- RegExpCompileData()
- : tree(nullptr),
- node(nullptr),
- simple(true),
- contains_anchor(false),
- capture_count(0) {}
- RegExpTree* tree;
- RegExpNode* node;
- bool simple;
- bool contains_anchor;
- Handle<FixedArray> capture_name_map;
- Handle<String> error;
- int capture_count;
-};
-
-
-class RegExpEngine: public AllStatic {
- public:
- struct CompilationResult {
- inline CompilationResult(Isolate* isolate, const char* error_message);
- CompilationResult(Object code, int registers)
- : code(code), num_registers(registers) {}
- const char* const error_message = nullptr;
- Object const code;
- int const num_registers = 0;
- };
-
- V8_EXPORT_PRIVATE static CompilationResult Compile(
- Isolate* isolate, Zone* zone, RegExpCompileData* input,
- JSRegExp::Flags flags, Handle<String> pattern,
- Handle<String> sample_subject, bool is_one_byte);
-
- static bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern);
-
- V8_EXPORT_PRIVATE static void DotPrint(const char* label, RegExpNode* node,
- bool ignore_case);
-};
-
-
-class RegExpResultsCache : public AllStatic {
- public:
- enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
-
- // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
- // On success, the returned result is guaranteed to be a COW-array.
- static Object Lookup(Heap* heap, String key_string, Object key_pattern,
- FixedArray* last_match_out, ResultsCacheType type);
- // Attempt to add value_array to the cache specified by type. On success,
- // value_array is turned into a COW-array.
- static void Enter(Isolate* isolate, Handle<String> key_string,
- Handle<Object> key_pattern, Handle<FixedArray> value_array,
- Handle<FixedArray> last_match_cache, ResultsCacheType type);
- static void Clear(FixedArray cache);
- static const int kRegExpResultsCacheSize = 0x100;
-
- private:
- static const int kArrayEntriesPerCacheEntry = 4;
- static const int kStringOffset = 0;
- static const int kPatternOffset = 1;
- static const int kArrayOffset = 2;
- static const int kLastMatchOffset = 3;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_REGEXP_JSREGEXP_H_
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 1fa9f7a35b..aab67cad15 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -50,7 +50,7 @@ class RegExpVisitor {
// A simple closed interval.
class Interval {
public:
- Interval() : from_(kNone), to_(kNone) {}
+ Interval() : from_(kNone), to_(kNone - 1) {} // '- 1' for branchless size().
Interval(int from, int to) : from_(from), to_(to) {}
Interval Union(Interval that) {
if (that.from_ == kNone)
@@ -60,12 +60,16 @@ class Interval {
else
return Interval(Min(from_, that.from_), Max(to_, that.to_));
}
+
bool Contains(int value) { return (from_ <= value) && (value <= to_); }
bool is_empty() { return from_ == kNone; }
int from() const { return from_; }
int to() const { return to_; }
+ int size() const { return to_ - from_ + 1; }
+
static Interval Empty() { return Interval(); }
- static const int kNone = -1;
+
+ static constexpr int kNone = -1;
private:
int from_;
@@ -268,12 +272,13 @@ class RegExpAlternative final : public RegExpTree {
class RegExpAssertion final : public RegExpTree {
public:
enum AssertionType {
- START_OF_LINE,
- START_OF_INPUT,
- END_OF_LINE,
- END_OF_INPUT,
- BOUNDARY,
- NON_BOUNDARY
+ START_OF_LINE = 0,
+ START_OF_INPUT = 1,
+ END_OF_LINE = 2,
+ END_OF_INPUT = 3,
+ BOUNDARY = 4,
+ NON_BOUNDARY = 5,
+ LAST_TYPE = NON_BOUNDARY,
};
RegExpAssertion(AssertionType type, JSRegExp::Flags flags)
: assertion_type_(type), flags_(flags) {}
@@ -285,7 +290,8 @@ class RegExpAssertion final : public RegExpTree {
bool IsAnchoredAtEnd() override;
int min_match() override { return 0; }
int max_match() override { return 0; }
- AssertionType assertion_type() { return assertion_type_; }
+ AssertionType assertion_type() const { return assertion_type_; }
+ JSRegExp::Flags flags() const { return flags_; }
private:
const AssertionType assertion_type_;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
index cda48aa00b..bd906fea15 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
@@ -2,30 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#ifndef V8_REGEXP_REGEXP_BYTECODE_GENERATOR_INL_H_
+#define V8_REGEXP_REGEXP_BYTECODE_GENERATOR_INL_H_
-#include "src/regexp/regexp-macro-assembler-irregexp.h"
+#include "src/regexp/regexp-bytecode-generator.h"
#include "src/ast/ast.h"
-#include "src/regexp/bytecodes-irregexp.h"
+#include "src/regexp/regexp-bytecodes.h"
namespace v8 {
namespace internal {
-void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
- uint32_t twenty_four_bits) {
+void RegExpBytecodeGenerator::Emit(uint32_t byte, uint32_t twenty_four_bits) {
uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
DCHECK(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
+ if (pc_ + 3 >= buffer_.length()) {
Expand();
}
*reinterpret_cast<uint32_t*>(buffer_.begin() + pc_) = word;
pc_ += 4;
}
-
-void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
+void RegExpBytecodeGenerator::Emit16(uint32_t word) {
DCHECK(pc_ <= buffer_.length());
if (pc_ + 1 >= buffer_.length()) {
Expand();
@@ -34,8 +32,7 @@ void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
pc_ += 2;
}
-
-void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
+void RegExpBytecodeGenerator::Emit8(uint32_t word) {
DCHECK(pc_ <= buffer_.length());
if (pc_ == buffer_.length()) {
Expand();
@@ -44,8 +41,7 @@ void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
pc_ += 1;
}
-
-void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
+void RegExpBytecodeGenerator::Emit32(uint32_t word) {
DCHECK(pc_ <= buffer_.length());
if (pc_ + 3 >= buffer_.length()) {
Expand();
@@ -57,4 +53,4 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#endif // V8_REGEXP_REGEXP_BYTECODE_GENERATOR_INL_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 712f00e509..ee3b4015d5 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -2,39 +2,35 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/regexp/regexp-macro-assembler-irregexp.h"
+#include "src/regexp/regexp-bytecode-generator.h"
#include "src/ast/ast.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/bytecodes-irregexp.h"
-#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
+#include "src/regexp/regexp-bytecode-generator-inl.h"
+#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Isolate* isolate,
- Zone* zone)
+RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone)
: RegExpMacroAssembler(isolate, zone),
buffer_(Vector<byte>::New(1024)),
pc_(0),
- own_buffer_(true),
advance_current_end_(kInvalidPC),
isolate_(isolate) {}
-RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
+RegExpBytecodeGenerator::~RegExpBytecodeGenerator() {
if (backtrack_.is_linked()) backtrack_.Unuse();
- if (own_buffer_) buffer_.Dispose();
+ buffer_.Dispose();
}
-
-RegExpMacroAssemblerIrregexp::IrregexpImplementation
-RegExpMacroAssemblerIrregexp::Implementation() {
+RegExpBytecodeGenerator::IrregexpImplementation
+RegExpBytecodeGenerator::Implementation() {
return kBytecodeImplementation;
}
-
-void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
+void RegExpBytecodeGenerator::Bind(Label* l) {
advance_current_end_ = kInvalidPC;
DCHECK(!l->is_bound());
if (l->is_linked()) {
@@ -48,8 +44,7 @@ void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
l->bind_to(pc_);
}
-
-void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
+void RegExpBytecodeGenerator::EmitOrLink(Label* l) {
if (l == nullptr) l = &backtrack_;
if (l->is_bound()) {
Emit32(l->pos());
@@ -63,102 +58,79 @@ void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
}
}
-
-void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
+void RegExpBytecodeGenerator::PopRegister(int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_POP_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::PushRegister(
- int register_index,
- StackCheckFlag check_stack_limit) {
+void RegExpBytecodeGenerator::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_PUSH_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister(
- int register_index, int cp_offset) {
+void RegExpBytecodeGenerator::WriteCurrentPositionToRegister(int register_index,
+ int cp_offset) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER_TO_CP, register_index);
Emit32(cp_offset); // Current position offset.
}
-
-void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) {
+void RegExpBytecodeGenerator::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
for (int reg = reg_from; reg <= reg_to; reg++) {
SetRegister(reg, -1);
}
}
-
-void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister(
+void RegExpBytecodeGenerator::ReadCurrentPositionFromRegister(
int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_CP_TO_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister(
- int register_index) {
+void RegExpBytecodeGenerator::WriteStackPointerToRegister(int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER_TO_SP, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
- int register_index) {
+void RegExpBytecodeGenerator::ReadStackPointerFromRegister(int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_SP_TO_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
+void RegExpBytecodeGenerator::SetCurrentPositionFromEnd(int by) {
DCHECK(is_uint24(by));
Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
}
-
-void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
+void RegExpBytecodeGenerator::SetRegister(int register_index, int to) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER, register_index);
Emit32(to);
}
-
-void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) {
+void RegExpBytecodeGenerator::AdvanceRegister(int register_index, int by) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_ADVANCE_REGISTER, register_index);
Emit32(by);
}
+void RegExpBytecodeGenerator::PopCurrentPosition() { Emit(BC_POP_CP, 0); }
-void RegExpMacroAssemblerIrregexp::PopCurrentPosition() {
- Emit(BC_POP_CP, 0);
-}
+void RegExpBytecodeGenerator::PushCurrentPosition() { Emit(BC_PUSH_CP, 0); }
+void RegExpBytecodeGenerator::Backtrack() { Emit(BC_POP_BT, 0); }
-void RegExpMacroAssemblerIrregexp::PushCurrentPosition() {
- Emit(BC_PUSH_CP, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::Backtrack() {
- Emit(BC_POP_BT, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::GoTo(Label* l) {
+void RegExpBytecodeGenerator::GoTo(Label* l) {
if (advance_current_end_ == pc_) {
// Combine advance current and goto.
pc_ = advance_current_start_;
@@ -172,25 +144,19 @@ void RegExpMacroAssemblerIrregexp::GoTo(Label* l) {
}
}
-
-void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) {
+void RegExpBytecodeGenerator::PushBacktrack(Label* l) {
Emit(BC_PUSH_BT, 0);
EmitOrLink(l);
}
-
-bool RegExpMacroAssemblerIrregexp::Succeed() {
+bool RegExpBytecodeGenerator::Succeed() {
Emit(BC_SUCCEED, 0);
return false; // Restart matching for global regexp not supported.
}
+void RegExpBytecodeGenerator::Fail() { Emit(BC_FAIL, 0); }
-void RegExpMacroAssemblerIrregexp::Fail() {
- Emit(BC_FAIL, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
+void RegExpBytecodeGenerator::AdvanceCurrentPosition(int by) {
DCHECK_LE(kMinCPOffset, by);
DCHECK_GE(kMaxCPOffset, by);
advance_current_start_ = pc_;
@@ -199,18 +165,16 @@ void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
advance_current_end_ = pc_;
}
-
-void RegExpMacroAssemblerIrregexp::CheckGreedyLoop(
- Label* on_tos_equals_current_position) {
+void RegExpBytecodeGenerator::CheckGreedyLoop(
+ Label* on_tos_equals_current_position) {
Emit(BC_CHECK_GREEDY, 0);
EmitOrLink(on_tos_equals_current_position);
}
-
-void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
- Label* on_failure,
- bool check_bounds,
- int characters) {
+void RegExpBytecodeGenerator::LoadCurrentCharacter(int cp_offset,
+ Label* on_failure,
+ bool check_bounds,
+ int characters) {
DCHECK_LE(kMinCPOffset, cp_offset);
DCHECK_GE(kMaxCPOffset, cp_offset);
int bytecode;
@@ -237,22 +201,17 @@ void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
if (check_bounds) EmitOrLink(on_failure);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterLT(uc16 limit,
- Label* on_less) {
+void RegExpBytecodeGenerator::CheckCharacterLT(uc16 limit, Label* on_less) {
Emit(BC_CHECK_LT, limit);
EmitOrLink(on_less);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterGT(uc16 limit,
- Label* on_greater) {
+void RegExpBytecodeGenerator::CheckCharacterGT(uc16 limit, Label* on_greater) {
Emit(BC_CHECK_GT, limit);
EmitOrLink(on_greater);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacter(uint32_t c, Label* on_equal) {
+void RegExpBytecodeGenerator::CheckCharacter(uint32_t c, Label* on_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_CHECK_4_CHARS, 0);
Emit32(c);
@@ -262,22 +221,19 @@ void RegExpMacroAssemblerIrregexp::CheckCharacter(uint32_t c, Label* on_equal) {
EmitOrLink(on_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckAtStart(Label* on_at_start) {
+void RegExpBytecodeGenerator::CheckAtStart(Label* on_at_start) {
Emit(BC_CHECK_AT_START, 0);
EmitOrLink(on_at_start);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotAtStart(int cp_offset,
- Label* on_not_at_start) {
+void RegExpBytecodeGenerator::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
Emit(BC_CHECK_NOT_AT_START, cp_offset);
EmitOrLink(on_not_at_start);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_CHECK_NOT_4_CHARS, 0);
Emit32(c);
@@ -287,11 +243,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotCharacter(uint32_t c,
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_equal) {
+void RegExpBytecodeGenerator::CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_AND_CHECK_4_CHARS, 0);
Emit32(c);
@@ -302,11 +255,9 @@ void RegExpMacroAssemblerIrregexp::CheckCharacterAfterAnd(
EmitOrLink(on_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_AND_CHECK_NOT_4_CHARS, 0);
Emit32(c);
@@ -317,43 +268,32 @@ void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterAnd(
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotCharacterAfterMinusAnd(
+ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
Emit(BC_MINUS_AND_CHECK_NOT_CHAR, c);
Emit16(minus);
Emit16(mask);
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
+void RegExpBytecodeGenerator::CheckCharacterInRange(uc16 from, uc16 to,
+ Label* on_in_range) {
Emit(BC_CHECK_CHAR_IN_RANGE, 0);
Emit16(from);
Emit16(to);
EmitOrLink(on_in_range);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
+void RegExpBytecodeGenerator::CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range) {
Emit(BC_CHECK_CHAR_NOT_IN_RANGE, 0);
Emit16(from);
Emit16(to);
EmitOrLink(on_not_in_range);
}
-
-void RegExpMacroAssemblerIrregexp::CheckBitInTable(
- Handle<ByteArray> table, Label* on_bit_set) {
+void RegExpBytecodeGenerator::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
Emit(BC_CHECK_BIT_IN_TABLE, 0);
EmitOrLink(on_bit_set);
for (int i = 0; i < kTableSize; i += kBitsPerByte) {
@@ -365,10 +305,9 @@ void RegExpMacroAssemblerIrregexp::CheckBitInTable(
}
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
- bool read_backward,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_not_equal) {
DCHECK_LE(0, start_reg);
DCHECK_GE(kMaxRegister, start_reg);
Emit(read_backward ? BC_CHECK_NOT_BACK_REF_BACKWARD : BC_CHECK_NOT_BACK_REF,
@@ -376,8 +315,7 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
+void RegExpBytecodeGenerator::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_not_equal) {
DCHECK_LE(0, start_reg);
DCHECK_GE(kMaxRegister, start_reg);
@@ -389,10 +327,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
- int comparand,
- Label* on_less_than) {
+void RegExpBytecodeGenerator::IfRegisterLT(int register_index, int comparand,
+ Label* on_less_than) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_LT, register_index);
@@ -400,10 +336,8 @@ void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
EmitOrLink(on_less_than);
}
-
-void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
- int comparand,
- Label* on_greater_or_equal) {
+void RegExpBytecodeGenerator::IfRegisterGE(int register_index, int comparand,
+ Label* on_greater_or_equal) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_GE, register_index);
@@ -411,18 +345,15 @@ void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
EmitOrLink(on_greater_or_equal);
}
-
-void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
- Label* on_eq) {
+void RegExpBytecodeGenerator::IfRegisterEqPos(int register_index,
+ Label* on_eq) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_EQ_POS, register_index);
EmitOrLink(on_eq);
}
-
-Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
- Handle<String> source) {
+Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
Handle<ByteArray> array = isolate_->factory()->NewByteArray(length());
@@ -430,25 +361,17 @@ Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
return array;
}
+int RegExpBytecodeGenerator::length() { return pc_; }
-int RegExpMacroAssemblerIrregexp::length() {
- return pc_;
-}
-
-void RegExpMacroAssemblerIrregexp::Copy(byte* a) {
+void RegExpBytecodeGenerator::Copy(byte* a) {
MemCopy(a, buffer_.begin(), length());
}
-
-void RegExpMacroAssemblerIrregexp::Expand() {
- bool old_buffer_was_our_own = own_buffer_;
+void RegExpBytecodeGenerator::Expand() {
Vector<byte> old_buffer = buffer_;
buffer_ = Vector<byte>::New(old_buffer.length() * 2);
- own_buffer_ = true;
MemCopy(buffer_.begin(), old_buffer.begin(), old_buffer.length());
- if (old_buffer_was_our_own) {
- old_buffer.Dispose();
- }
+ old_buffer.Dispose();
}
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 9e17dca415..b7207e977c 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#ifndef V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
+#define V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
-// A light-weight assembler for the Irregexp byte code.
-class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
- : public RegExpMacroAssembler {
+// An assembler/generator for the Irregexp byte code.
+class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -22,8 +21,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
// The assembler allocates and grows its own buffer, and buffer_size
// determines the initial buffer size. The buffer is owned by the assembler
// and deallocated upon destruction of the assembler.
- RegExpMacroAssemblerIrregexp(Isolate* isolate, Zone* zone);
- virtual ~RegExpMacroAssemblerIrregexp();
+ RegExpBytecodeGenerator(Isolate* isolate, Zone* zone);
+ virtual ~RegExpBytecodeGenerator();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
virtual bool CanReadUnaligned() { return false; }
@@ -47,13 +46,11 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void WriteStackPointerToRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
+ virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
+ virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
@@ -61,18 +58,12 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
+ virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
+ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
virtual void CheckNotBackReference(int start_reg, bool read_backward,
@@ -103,8 +94,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
Vector<byte> buffer_;
// The program counter.
int pc_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
Label backtrack_;
int advance_current_start_;
@@ -115,10 +104,10 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
static const int kInvalidPC = -1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpBytecodeGenerator);
};
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#endif // V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
diff --git a/deps/v8/src/regexp/bytecodes-irregexp.h b/deps/v8/src/regexp/regexp-bytecodes.h
index a27c9a0a2b..8b1468c1bf 100644
--- a/deps/v8/src/regexp/bytecodes-irregexp.h
+++ b/deps/v8/src/regexp/regexp-bytecodes.h
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#ifndef V8_REGEXP_BYTECODES_IRREGEXP_H_
-#define V8_REGEXP_BYTECODES_IRREGEXP_H_
+#ifndef V8_REGEXP_REGEXP_BYTECODES_H_
+#define V8_REGEXP_REGEXP_BYTECODES_H_
namespace v8 {
namespace internal {
-
const int BYTECODE_MASK = 0xff;
// The first argument is packed in with the byte code in one word, but so it
// has 24 bits, but it can be positive and negative so only use 23 bits for
@@ -71,8 +69,7 @@ const int BYTECODE_SHIFT = 8;
V(ADVANCE_CP_AND_GOTO, 50, 8) /* bc8 offset24 addr32 */ \
V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */
-#define DECLARE_BYTECODES(name, code, length) \
- static const int BC_##name = code;
+#define DECLARE_BYTECODES(name, code, length) static const int BC_##name = code;
BYTECODE_ITERATOR(DECLARE_BYTECODES)
#undef DECLARE_BYTECODES
@@ -84,4 +81,4 @@ BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_BYTECODES_IRREGEXP_H_
+#endif // V8_REGEXP_REGEXP_BYTECODES_H_
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
new file mode 100644
index 0000000000..d12c35682e
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -0,0 +1,1678 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-compiler.h"
+
+#include "src/execution/isolate.h"
+#include "src/regexp/regexp.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/regexp/special-case.h"
+#endif // V8_INTL_SUPPORT
+#include "src/strings/unicode-inl.h"
+#include "src/zone/zone-list-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "unicode/locid.h"
+#include "unicode/uniset.h"
+#include "unicode/utypes.h"
+#endif // V8_INTL_SUPPORT
+
+namespace v8 {
+namespace internal {
+
+using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+
+// -------------------------------------------------------------------
+// Tree to graph conversion
+
+RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ ZoneList<TextElement>* elms =
+ new (compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
+ elms->Add(TextElement::Atom(this), compiler->zone());
+ return new (compiler->zone())
+ TextNode(elms, compiler->read_backward(), on_success);
+}
+
+RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return new (compiler->zone())
+ TextNode(elements(), compiler->read_backward(), on_success);
+}
+
+static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
+ const int* special_class, int length) {
+ length--; // Remove final marker.
+ DCHECK_EQ(kRangeEndMarker, special_class[length]);
+ DCHECK_NE(0, ranges->length());
+ DCHECK_NE(0, length);
+ DCHECK_NE(0, special_class[0]);
+ if (ranges->length() != (length >> 1) + 1) {
+ return false;
+ }
+ CharacterRange range = ranges->at(0);
+ if (range.from() != 0) {
+ return false;
+ }
+ for (int i = 0; i < length; i += 2) {
+ if (special_class[i] != (range.to() + 1)) {
+ return false;
+ }
+ range = ranges->at((i >> 1) + 1);
+ if (special_class[i + 1] != range.from()) {
+ return false;
+ }
+ }
+ if (range.to() != String::kMaxCodePoint) {
+ return false;
+ }
+ return true;
+}
+
+static bool CompareRanges(ZoneList<CharacterRange>* ranges,
+ const int* special_class, int length) {
+ length--; // Remove final marker.
+ DCHECK_EQ(kRangeEndMarker, special_class[length]);
+ if (ranges->length() * 2 != length) {
+ return false;
+ }
+ for (int i = 0; i < length; i += 2) {
+ CharacterRange range = ranges->at(i >> 1);
+ if (range.from() != special_class[i] ||
+ range.to() != special_class[i + 1] - 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool RegExpCharacterClass::is_standard(Zone* zone) {
+ // TODO(lrn): Remove need for this function, by not throwing away information
+ // along the way.
+ if (is_negated()) {
+ return false;
+ }
+ if (set_.is_standard()) {
+ return true;
+ }
+ if (CompareRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
+ set_.set_standard_set_type('s');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
+ set_.set_standard_set_type('S');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(zone), kLineTerminatorRanges,
+ kLineTerminatorRangeCount)) {
+ set_.set_standard_set_type('.');
+ return true;
+ }
+ if (CompareRanges(set_.ranges(zone), kLineTerminatorRanges,
+ kLineTerminatorRangeCount)) {
+ set_.set_standard_set_type('n');
+ return true;
+ }
+ if (CompareRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('w');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('W');
+ return true;
+ }
+ return false;
+}
+
+UnicodeRangeSplitter::UnicodeRangeSplitter(ZoneList<CharacterRange>* base) {
+ // The unicode range splitter categorizes given character ranges into:
+ // - Code points from the BMP representable by one code unit.
+ // - Code points outside the BMP that need to be split into surrogate pairs.
+ // - Lone lead surrogates.
+ // - Lone trail surrogates.
+ // Lone surrogates are valid code points, even though no actual characters.
+ // They require special matching to make sure we do not split surrogate pairs.
+
+ for (int i = 0; i < base->length(); i++) AddRange(base->at(i));
+}
+
+void UnicodeRangeSplitter::AddRange(CharacterRange range) {
+ static constexpr uc32 kBmp1Start = 0;
+ static constexpr uc32 kBmp1End = kLeadSurrogateStart - 1;
+ static constexpr uc32 kBmp2Start = kTrailSurrogateEnd + 1;
+ static constexpr uc32 kBmp2End = kNonBmpStart - 1;
+
+ // Ends are all inclusive.
+ STATIC_ASSERT(kBmp1Start == 0);
+ STATIC_ASSERT(kBmp1Start < kBmp1End);
+ STATIC_ASSERT(kBmp1End + 1 == kLeadSurrogateStart);
+ STATIC_ASSERT(kLeadSurrogateStart < kLeadSurrogateEnd);
+ STATIC_ASSERT(kLeadSurrogateEnd + 1 == kTrailSurrogateStart);
+ STATIC_ASSERT(kTrailSurrogateStart < kTrailSurrogateEnd);
+ STATIC_ASSERT(kTrailSurrogateEnd + 1 == kBmp2Start);
+ STATIC_ASSERT(kBmp2Start < kBmp2End);
+ STATIC_ASSERT(kBmp2End + 1 == kNonBmpStart);
+ STATIC_ASSERT(kNonBmpStart < kNonBmpEnd);
+
+ static constexpr uc32 kStarts[] = {
+ kBmp1Start, kLeadSurrogateStart, kTrailSurrogateStart,
+ kBmp2Start, kNonBmpStart,
+ };
+
+ static constexpr uc32 kEnds[] = {
+ kBmp1End, kLeadSurrogateEnd, kTrailSurrogateEnd, kBmp2End, kNonBmpEnd,
+ };
+
+ CharacterRangeVector* const kTargets[] = {
+ &bmp_, &lead_surrogates_, &trail_surrogates_, &bmp_, &non_bmp_,
+ };
+
+ static constexpr int kCount = arraysize(kStarts);
+ STATIC_ASSERT(kCount == arraysize(kEnds));
+ STATIC_ASSERT(kCount == arraysize(kTargets));
+
+ for (int i = 0; i < kCount; i++) {
+ if (kStarts[i] > range.to()) break;
+ const uc32 from = std::max(kStarts[i], range.from());
+ const uc32 to = std::min(kEnds[i], range.to());
+ if (from > to) continue;
+ kTargets[i]->emplace_back(CharacterRange::Range(from, to));
+ }
+}
+
+namespace {
+
+// Translates between new and old V8-isms (SmallVector, ZoneList).
+ZoneList<CharacterRange>* ToCanonicalZoneList(
+ const UnicodeRangeSplitter::CharacterRangeVector* v, Zone* zone) {
+ if (v->empty()) return nullptr;
+
+ ZoneList<CharacterRange>* result =
+ new (zone) ZoneList<CharacterRange>(static_cast<int>(v->size()), zone);
+ for (size_t i = 0; i < v->size(); i++) {
+ result->Add(v->at(i), zone);
+ }
+
+ CharacterRange::Canonicalize(result);
+ return result;
+}
+
+void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* bmp =
+ ToCanonicalZoneList(splitter->bmp(), compiler->zone());
+ if (bmp == nullptr) return;
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
+ compiler->zone(), bmp, compiler->read_backward(), on_success,
+ default_flags)));
+}
+
+void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* non_bmp =
+ ToCanonicalZoneList(splitter->non_bmp(), compiler->zone());
+ if (non_bmp == nullptr) return;
+ DCHECK(!compiler->one_byte());
+ Zone* zone = compiler->zone();
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ CharacterRange::Canonicalize(non_bmp);
+ for (int i = 0; i < non_bmp->length(); i++) {
+ // Match surrogate pair.
+ // E.g. [\u10005-\u11005] becomes
+ // \ud800[\udc05-\udfff]|
+ // [\ud801-\ud803][\udc00-\udfff]|
+ // \ud804[\udc00-\udc05]
+ uc32 from = non_bmp->at(i).from();
+ uc32 to = non_bmp->at(i).to();
+ uc16 from_l = unibrow::Utf16::LeadSurrogate(from);
+ uc16 from_t = unibrow::Utf16::TrailSurrogate(from);
+ uc16 to_l = unibrow::Utf16::LeadSurrogate(to);
+ uc16 to_t = unibrow::Utf16::TrailSurrogate(to);
+ if (from_l == to_l) {
+ // The lead surrogate is the same.
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(from_l),
+ CharacterRange::Range(from_t, to_t), compiler->read_backward(),
+ on_success, default_flags)));
+ } else {
+ if (from_t != kTrailSurrogateStart) {
+ // Add [from_l][from_t-\udfff]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(from_l),
+ CharacterRange::Range(from_t, kTrailSurrogateEnd),
+ compiler->read_backward(), on_success, default_flags)));
+ from_l++;
+ }
+ if (to_t != kTrailSurrogateEnd) {
+ // Add [to_l][\udc00-to_t]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(to_l),
+ CharacterRange::Range(kTrailSurrogateStart, to_t),
+ compiler->read_backward(), on_success, default_flags)));
+ to_l--;
+ }
+ if (from_l <= to_l) {
+ // Add [from_l-to_l][\udc00-\udfff]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Range(from_l, to_l),
+ CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
+ compiler->read_backward(), on_success, default_flags)));
+ }
+ }
+ }
+}
+
+RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
+ RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
+ ZoneList<CharacterRange>* match, RegExpNode* on_success, bool read_backward,
+ JSRegExp::Flags flags) {
+ Zone* zone = compiler->zone();
+ RegExpNode* match_node = TextNode::CreateForCharacterRanges(
+ zone, match, read_backward, on_success, flags);
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpLookaround::Builder lookaround(false, match_node, stack_register,
+ position_register);
+ RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+ zone, lookbehind, !read_backward, lookaround.on_match_success(), flags);
+ return lookaround.ForMatch(negative_match);
+}
+
+RegExpNode* MatchAndNegativeLookaroundInReadDirection(
+ RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
+ ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
+ bool read_backward, JSRegExp::Flags flags) {
+ Zone* zone = compiler->zone();
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpLookaround::Builder lookaround(false, on_success, stack_register,
+ position_register);
+ RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+ zone, lookahead, read_backward, lookaround.on_match_success(), flags);
+ return TextNode::CreateForCharacterRanges(
+ zone, match, read_backward, lookaround.ForMatch(negative_match), flags);
+}
+
+void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ ZoneList<CharacterRange>* lead_surrogates =
+ ToCanonicalZoneList(splitter->lead_surrogates(), compiler->zone());
+ if (lead_surrogates == nullptr) return;
+ Zone* zone = compiler->zone();
+ // E.g. \ud801 becomes \ud801(?![\udc00-\udfff]).
+ ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+ RegExpNode* match;
+ if (compiler->read_backward()) {
+ // Reading backward. Assert that reading forward, there is no trail
+ // surrogate, and then backward match the lead surrogate.
+ match = NegativeLookaroundAgainstReadDirectionAndMatch(
+ compiler, trail_surrogates, lead_surrogates, on_success, true,
+ default_flags);
+ } else {
+ // Reading forward. Forward match the lead surrogate and assert that
+ // no trail surrogate follows.
+ match = MatchAndNegativeLookaroundInReadDirection(
+ compiler, lead_surrogates, trail_surrogates, on_success, false,
+ default_flags);
+ }
+ result->AddAlternative(GuardedAlternative(match));
+}
+
+void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ ZoneList<CharacterRange>* trail_surrogates =
+ ToCanonicalZoneList(splitter->trail_surrogates(), compiler->zone());
+ if (trail_surrogates == nullptr) return;
+ Zone* zone = compiler->zone();
+ // E.g. \udc01 becomes (?<![\ud800-\udbff])\udc01
+ ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+
+ RegExpNode* match;
+ if (compiler->read_backward()) {
+ // Reading backward. Backward match the trail surrogate and assert that no
+ // lead surrogate precedes it.
+ match = MatchAndNegativeLookaroundInReadDirection(
+ compiler, trail_surrogates, lead_surrogates, on_success, true,
+ default_flags);
+ } else {
+ // Reading forward. Assert that reading backward, there is no lead
+ // surrogate, and then forward match the trail surrogate.
+ match = NegativeLookaroundAgainstReadDirectionAndMatch(
+ compiler, lead_surrogates, trail_surrogates, on_success, false,
+ default_flags);
+ }
+ result->AddAlternative(GuardedAlternative(match));
+}
+
+RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ // This implements ES2015 21.2.5.2.3, AdvanceStringIndex.
+ DCHECK(!compiler->read_backward());
+ Zone* zone = compiler->zone();
+ // Advance any character. If the character happens to be a lead surrogate and
+ // we advanced into the middle of a surrogate pair, it will work out, as
+ // nothing will match from there. We will have to advance again, consuming
+ // the associated trail surrogate.
+ ZoneList<CharacterRange>* range = CharacterRange::List(
+ zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ return TextNode::CreateForCharacterRanges(zone, range, false, on_success,
+ default_flags);
+}
+
+void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
+#ifdef V8_INTL_SUPPORT
+ DCHECK(CharacterRange::IsCanonical(ranges));
+
+ // Micro-optimization to avoid passing large ranges to UnicodeSet::closeOver.
+ // See also https://crbug.com/v8/6727.
+ // TODO(jgruber): This only covers the special case of the {0,0x10FFFF} range,
+ // which we use frequently internally. But large ranges can also easily be
+ // created by the user. We might want to have a more general caching mechanism
+ // for such ranges.
+ if (ranges->length() == 1 && ranges->at(0).IsEverything(kNonBmpEnd)) return;
+
+ // Use ICU to compute the case fold closure over the ranges.
+ icu::UnicodeSet set;
+ for (int i = 0; i < ranges->length(); i++) {
+ set.add(ranges->at(i).from(), ranges->at(i).to());
+ }
+ ranges->Clear();
+ set.closeOver(USET_CASE_INSENSITIVE);
+ // Full case mapping map single characters to multiple characters.
+ // Those are represented as strings in the set. Remove them so that
+ // we end up with only simple and common case mappings.
+ set.removeAllStrings();
+ for (int i = 0; i < set.getRangeCount(); i++) {
+ ranges->Add(CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
+ zone);
+ }
+ // No errors and everything we collected have been ranges.
+ CharacterRange::Canonicalize(ranges);
+#endif // V8_INTL_SUPPORT
+}
+
+} // namespace
+
+RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ set_.Canonicalize();
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* ranges = this->ranges(zone);
+ if (NeedsUnicodeCaseEquivalents(flags_)) {
+ AddUnicodeCaseEquivalents(ranges, zone);
+ }
+ if (IsUnicode(flags_) && !compiler->one_byte() &&
+ !contains_split_surrogate()) {
+ if (is_negated()) {
+ ZoneList<CharacterRange>* negated =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::Negate(ranges, negated, zone);
+ ranges = negated;
+ }
+ if (ranges->length() == 0) {
+ JSRegExp::Flags default_flags;
+ RegExpCharacterClass* fail =
+ new (zone) RegExpCharacterClass(zone, ranges, default_flags);
+ return new (zone) TextNode(fail, compiler->read_backward(), on_success);
+ }
+ if (standard_type() == '*') {
+ return UnanchoredAdvance(compiler, on_success);
+ } else {
+ ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+ UnicodeRangeSplitter splitter(ranges);
+ AddBmpCharacters(compiler, result, on_success, &splitter);
+ AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
+ AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
+ AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
+ return result;
+ }
+ } else {
+ return new (zone) TextNode(this, compiler->read_backward(), on_success);
+ }
+}
+
+int CompareFirstChar(RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ uc16 character1 = atom1->data().at(0);
+ uc16 character2 = atom2->data().at(0);
+ if (character1 < character2) return -1;
+ if (character1 > character2) return 1;
+ return 0;
+}
+
+#ifdef V8_INTL_SUPPORT
+
+// Case Insensitve comparesion
+int CompareFirstCharCaseInsensitve(RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ icu::UnicodeString character1(atom1->data().at(0));
+ return character1.caseCompare(atom2->data().at(0), U_FOLD_CASE_DEFAULT);
+}
+
+#else
+
+static unibrow::uchar Canonical(
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ unibrow::uchar c) {
+ unibrow::uchar chars[unibrow::Ecma262Canonicalize::kMaxWidth];
+ int length = canonicalize->get(c, '\0', chars);
+ DCHECK_LE(length, 1);
+ unibrow::uchar canonical = c;
+ if (length == 1) canonical = chars[0];
+ return canonical;
+}
+
+int CompareFirstCharCaseIndependent(
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ unibrow::uchar character1 = atom1->data().at(0);
+ unibrow::uchar character2 = atom2->data().at(0);
+ if (character1 == character2) return 0;
+ if (character1 >= 'a' || character2 >= 'a') {
+ character1 = Canonical(canonicalize, character1);
+ character2 = Canonical(canonicalize, character2);
+ }
+ return static_cast<int>(character1) - static_cast<int>(character2);
+}
+#endif // V8_INTL_SUPPORT
+
+// We can stable sort runs of atoms, since the order does not matter if they
+// start with different characters.
+// Returns true if any consecutive atoms were found.
+bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+ bool found_consecutive_atoms = false;
+ for (int i = 0; i < length; i++) {
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (alternative->IsAtom()) break;
+ i++;
+ }
+ // i is length or it is the index of an atom.
+ if (i == length) break;
+ int first_atom = i;
+ JSRegExp::Flags flags = alternatives->at(i)->AsAtom()->flags();
+ i++;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ if (alternative->AsAtom()->flags() != flags) break;
+ i++;
+ }
+ // Sort atoms to get ones with common prefixes together.
+ // This step is more tricky if we are in a case-independent regexp,
+ // because it would change /is|I/ to /I|is/, and order matters when
+ // the regexp parts don't match only disjoint starting points. To fix
+ // this we have a version of CompareFirstChar that uses case-
+ // independent character classes for comparison.
+ DCHECK_LT(first_atom, alternatives->length());
+ DCHECK_LE(i, alternatives->length());
+ DCHECK_LE(first_atom, i);
+ if (IgnoreCase(flags)) {
+#ifdef V8_INTL_SUPPORT
+ alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
+ i - first_atom);
+#else
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ compiler->isolate()->regexp_macro_assembler_canonicalize();
+ auto compare_closure = [canonicalize](RegExpTree* const* a,
+ RegExpTree* const* b) {
+ return CompareFirstCharCaseIndependent(canonicalize, a, b);
+ };
+ alternatives->StableSort(compare_closure, first_atom, i - first_atom);
+#endif // V8_INTL_SUPPORT
+ } else {
+ alternatives->StableSort(CompareFirstChar, first_atom, i - first_atom);
+ }
+ if (i - first_atom > 1) found_consecutive_atoms = true;
+ }
+ return found_consecutive_atoms;
+}
+
+// Optimizes ab|ac|az to a(?:b|c|d).
+void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
+ Zone* zone = compiler->zone();
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+
+ int write_posn = 0;
+ int i = 0;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ RegExpAtom* const atom = alternative->AsAtom();
+ JSRegExp::Flags flags = atom->flags();
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeString common_prefix(atom->data().at(0));
+#else
+ unibrow::uchar common_prefix = atom->data().at(0);
+#endif // V8_INTL_SUPPORT
+ int first_with_prefix = i;
+ int prefix_length = atom->length();
+ i++;
+ while (i < length) {
+ alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ RegExpAtom* const atom = alternative->AsAtom();
+ if (atom->flags() != flags) break;
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeString new_prefix(atom->data().at(0));
+ if (new_prefix != common_prefix) {
+ if (!IgnoreCase(flags)) break;
+ if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
+ break;
+ }
+#else
+ unibrow::uchar new_prefix = atom->data().at(0);
+ if (new_prefix != common_prefix) {
+ if (!IgnoreCase(flags)) break;
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ compiler->isolate()->regexp_macro_assembler_canonicalize();
+ new_prefix = Canonical(canonicalize, new_prefix);
+ common_prefix = Canonical(canonicalize, common_prefix);
+ if (new_prefix != common_prefix) break;
+ }
+#endif // V8_INTL_SUPPORT
+ prefix_length = Min(prefix_length, atom->length());
+ i++;
+ }
+ if (i > first_with_prefix + 2) {
+ // Found worthwhile run of alternatives with common prefix of at least one
+ // character. The sorting function above did not sort on more than one
+ // character for reasons of correctness, but there may still be a longer
+ // common prefix if the terms were similar or presorted in the input.
+ // Find out how long the common prefix is.
+ int run_length = i - first_with_prefix;
+ RegExpAtom* const atom = alternatives->at(first_with_prefix)->AsAtom();
+ for (int j = 1; j < run_length && prefix_length > 1; j++) {
+ RegExpAtom* old_atom =
+ alternatives->at(j + first_with_prefix)->AsAtom();
+ for (int k = 1; k < prefix_length; k++) {
+ if (atom->data().at(k) != old_atom->data().at(k)) {
+ prefix_length = k;
+ break;
+ }
+ }
+ }
+ RegExpAtom* prefix = new (zone)
+ RegExpAtom(atom->data().SubVector(0, prefix_length), flags);
+ ZoneList<RegExpTree*>* pair = new (zone) ZoneList<RegExpTree*>(2, zone);
+ pair->Add(prefix, zone);
+ ZoneList<RegExpTree*>* suffixes =
+ new (zone) ZoneList<RegExpTree*>(run_length, zone);
+ for (int j = 0; j < run_length; j++) {
+ RegExpAtom* old_atom =
+ alternatives->at(j + first_with_prefix)->AsAtom();
+ int len = old_atom->length();
+ if (len == prefix_length) {
+ suffixes->Add(new (zone) RegExpEmpty(), zone);
+ } else {
+ RegExpTree* suffix = new (zone) RegExpAtom(
+ old_atom->data().SubVector(prefix_length, old_atom->length()),
+ flags);
+ suffixes->Add(suffix, zone);
+ }
+ }
+ pair->Add(new (zone) RegExpDisjunction(suffixes), zone);
+ alternatives->at(write_posn++) = new (zone) RegExpAlternative(pair);
+ } else {
+ // Just copy any non-worthwhile alternatives.
+ for (int j = first_with_prefix; j < i; j++) {
+ alternatives->at(write_posn++) = alternatives->at(j);
+ }
+ }
+ }
+ alternatives->Rewind(write_posn); // Trim end of array.
+}
+
+// Optimizes b|c|z to [bcz].
+void RegExpDisjunction::FixSingleCharacterDisjunctions(
+ RegExpCompiler* compiler) {
+ Zone* zone = compiler->zone();
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+
+ int write_posn = 0;
+ int i = 0;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ RegExpAtom* const atom = alternative->AsAtom();
+ if (atom->length() != 1) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ JSRegExp::Flags flags = atom->flags();
+ DCHECK_IMPLIES(IsUnicode(flags),
+ !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
+ bool contains_trail_surrogate =
+ unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
+ int first_in_run = i;
+ i++;
+ // Find a run of single-character atom alternatives that have identical
+ // flags (case independence and unicode-ness).
+ while (i < length) {
+ alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ RegExpAtom* const atom = alternative->AsAtom();
+ if (atom->length() != 1) break;
+ if (atom->flags() != flags) break;
+ DCHECK_IMPLIES(IsUnicode(flags),
+ !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
+ contains_trail_surrogate |=
+ unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
+ i++;
+ }
+ if (i > first_in_run + 1) {
+ // Found non-trivial run of single-character alternatives.
+ int run_length = i - first_in_run;
+ ZoneList<CharacterRange>* ranges =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ for (int j = 0; j < run_length; j++) {
+ RegExpAtom* old_atom = alternatives->at(j + first_in_run)->AsAtom();
+ DCHECK_EQ(old_atom->length(), 1);
+ ranges->Add(CharacterRange::Singleton(old_atom->data().at(0)), zone);
+ }
+ RegExpCharacterClass::CharacterClassFlags character_class_flags;
+ if (IsUnicode(flags) && contains_trail_surrogate) {
+ character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
+ }
+ alternatives->at(write_posn++) = new (zone)
+ RegExpCharacterClass(zone, ranges, flags, character_class_flags);
+ } else {
+ // Just copy any trivial alternatives.
+ for (int j = first_in_run; j < i; j++) {
+ alternatives->at(write_posn++) = alternatives->at(j);
+ }
+ }
+ }
+ alternatives->Rewind(write_posn); // Trim end of array.
+}
+
+RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+
+ if (alternatives->length() > 2) {
+ bool found_consecutive_atoms = SortConsecutiveAtoms(compiler);
+ if (found_consecutive_atoms) RationalizeConsecutiveAtoms(compiler);
+ FixSingleCharacterDisjunctions(compiler);
+ if (alternatives->length() == 1) {
+ return alternatives->at(0)->ToNode(compiler, on_success);
+ }
+ }
+
+ int length = alternatives->length();
+
+ ChoiceNode* result =
+ new (compiler->zone()) ChoiceNode(length, compiler->zone());
+ for (int i = 0; i < length; i++) {
+ GuardedAlternative alternative(
+ alternatives->at(i)->ToNode(compiler, on_success));
+ result->AddAlternative(alternative);
+ }
+ return result;
+}
+
+RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return ToNode(min(), max(), is_greedy(), body(), compiler, on_success);
+}
+
+namespace {
+// Desugar \b to (?<=\w)(?=\W)|(?<=\W)(?=\w) and
+// \B to (?<=\w)(?=\w)|(?<=\W)(?=\W)
+RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ RegExpAssertion::AssertionType type,
+ JSRegExp::Flags flags) {
+ DCHECK(NeedsUnicodeCaseEquivalents(flags));
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* word_range =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::AddClassEscape('w', word_range, true, zone);
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+ // Add two choices. The (non-)boundary could start with a word or
+ // a non-word-character.
+ for (int i = 0; i < 2; i++) {
+ bool lookbehind_for_word = i == 0;
+ bool lookahead_for_word =
+ (type == RegExpAssertion::BOUNDARY) ^ lookbehind_for_word;
+ // Look to the left.
+ RegExpLookaround::Builder lookbehind(lookbehind_for_word, on_success,
+ stack_register, position_register);
+ RegExpNode* backward = TextNode::CreateForCharacterRanges(
+ zone, word_range, true, lookbehind.on_match_success(), flags);
+ // Look to the right.
+ RegExpLookaround::Builder lookahead(lookahead_for_word,
+ lookbehind.ForMatch(backward),
+ stack_register, position_register);
+ RegExpNode* forward = TextNode::CreateForCharacterRanges(
+ zone, word_range, false, lookahead.on_match_success(), flags);
+ result->AddAlternative(GuardedAlternative(lookahead.ForMatch(forward)));
+ }
+ return result;
+}
+} // anonymous namespace
+
+RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ NodeInfo info;
+ Zone* zone = compiler->zone();
+
+ switch (assertion_type()) {
+ case START_OF_LINE:
+ return AssertionNode::AfterNewline(on_success);
+ case START_OF_INPUT:
+ return AssertionNode::AtStart(on_success);
+ case BOUNDARY:
+ return NeedsUnicodeCaseEquivalents(flags_)
+ ? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY,
+ flags_)
+ : AssertionNode::AtBoundary(on_success);
+ case NON_BOUNDARY:
+ return NeedsUnicodeCaseEquivalents(flags_)
+ ? BoundaryAssertionAsLookaround(compiler, on_success,
+ NON_BOUNDARY, flags_)
+ : AssertionNode::AtNonBoundary(on_success);
+ case END_OF_INPUT:
+ return AssertionNode::AtEnd(on_success);
+ case END_OF_LINE: {
+ // Compile $ in multiline regexps as an alternation with a positive
+ // lookahead in one side and an end-of-input on the other side.
+ // We need two registers for the lookahead.
+ int stack_pointer_register = compiler->AllocateRegister();
+ int position_register = compiler->AllocateRegister();
+ // The ChoiceNode to distinguish between a newline and end-of-input.
+ ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+ // Create a newline atom.
+ ZoneList<CharacterRange>* newline_ranges =
+ new (zone) ZoneList<CharacterRange>(3, zone);
+ CharacterRange::AddClassEscape('n', newline_ranges, false, zone);
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ RegExpCharacterClass* newline_atom =
+ new (zone) RegExpCharacterClass('n', default_flags);
+ TextNode* newline_matcher =
+ new (zone) TextNode(newline_atom, false,
+ ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register,
+ 0, // No captures inside.
+ -1, // Ignored if no captures.
+ on_success));
+ // Create an end-of-input matcher.
+ RegExpNode* end_of_line = ActionNode::BeginSubmatch(
+ stack_pointer_register, position_register, newline_matcher);
+ // Add the two alternatives to the ChoiceNode.
+ GuardedAlternative eol_alternative(end_of_line);
+ result->AddAlternative(eol_alternative);
+ GuardedAlternative end_alternative(AssertionNode::AtEnd(on_success));
+ result->AddAlternative(end_alternative);
+ return result;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return on_success;
+}
+
+RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return new (compiler->zone())
+ BackReferenceNode(RegExpCapture::StartRegister(index()),
+ RegExpCapture::EndRegister(index()), flags_,
+ compiler->read_backward(), on_success);
+}
+
+RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return on_success;
+}
+
+RegExpLookaround::Builder::Builder(bool is_positive, RegExpNode* on_success,
+ int stack_pointer_register,
+ int position_register,
+ int capture_register_count,
+ int capture_register_start)
+ : is_positive_(is_positive),
+ on_success_(on_success),
+ stack_pointer_register_(stack_pointer_register),
+ position_register_(position_register) {
+ if (is_positive_) {
+ on_match_success_ = ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register, capture_register_count,
+ capture_register_start, on_success_);
+ } else {
+ Zone* zone = on_success_->zone();
+ on_match_success_ = new (zone) NegativeSubmatchSuccess(
+ stack_pointer_register, position_register, capture_register_count,
+ capture_register_start, zone);
+ }
+}
+
+RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) {
+ if (is_positive_) {
+ return ActionNode::BeginSubmatch(stack_pointer_register_,
+ position_register_, match);
+ } else {
+ Zone* zone = on_success_->zone();
+ // We use a ChoiceNode to represent the negative lookaround. The first
+ // alternative is the negative match. On success, the end node backtracks.
+ // On failure, the second alternative is tried and leads to success.
+ // NegativeLookaheadChoiceNode is a special ChoiceNode that ignores the
+ // first exit when calculating quick checks.
+ ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
+ GuardedAlternative(match), GuardedAlternative(on_success_), zone);
+ return ActionNode::BeginSubmatch(stack_pointer_register_,
+ position_register_, choice_node);
+ }
+}
+
+RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ int stack_pointer_register = compiler->AllocateRegister();
+ int position_register = compiler->AllocateRegister();
+
+ const int registers_per_capture = 2;
+ const int register_of_first_capture = 2;
+ int register_count = capture_count_ * registers_per_capture;
+ int register_start =
+ register_of_first_capture + capture_from_ * registers_per_capture;
+
+ RegExpNode* result;
+ bool was_reading_backward = compiler->read_backward();
+ compiler->set_read_backward(type() == LOOKBEHIND);
+ Builder builder(is_positive(), on_success, stack_pointer_register,
+ position_register, register_count, register_start);
+ RegExpNode* match = body_->ToNode(compiler, builder.on_match_success());
+ result = builder.ForMatch(match);
+ compiler->set_read_backward(was_reading_backward);
+ return result;
+}
+
+RegExpNode* RegExpCapture::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return ToNode(body(), index(), compiler, on_success);
+}
+
+RegExpNode* RegExpCapture::ToNode(RegExpTree* body, int index,
+ RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ DCHECK_NOT_NULL(body);
+ int start_reg = RegExpCapture::StartRegister(index);
+ int end_reg = RegExpCapture::EndRegister(index);
+ if (compiler->read_backward()) std::swap(start_reg, end_reg);
+ RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
+ RegExpNode* body_node = body->ToNode(compiler, store_end);
+ return ActionNode::StorePosition(start_reg, true, body_node);
+}
+
+namespace {
+
+class AssertionSequenceRewriter final {
+ public:
+ // TODO(jgruber): Consider moving this to a separate AST tree rewriter pass
+ // instead of sprinkling rewrites into the AST->Node conversion process.
+ static void MaybeRewrite(ZoneList<RegExpTree*>* terms, Zone* zone) {
+ AssertionSequenceRewriter rewriter(terms, zone);
+
+ static constexpr int kNoIndex = -1;
+ int from = kNoIndex;
+
+ for (int i = 0; i < terms->length(); i++) {
+ RegExpTree* t = terms->at(i);
+ if (from == kNoIndex && t->IsAssertion()) {
+ from = i; // Start a sequence.
+ } else if (from != kNoIndex && !t->IsAssertion()) {
+ // Terminate and process the sequence.
+ if (i - from > 1) rewriter.Rewrite(from, i);
+ from = kNoIndex;
+ }
+ }
+
+ if (from != kNoIndex && terms->length() - from > 1) {
+ rewriter.Rewrite(from, terms->length());
+ }
+ }
+
+ // All assertions are zero width. A consecutive sequence of assertions is
+ // order-independent. There's two ways we can optimize here:
+ // 1. fold all identical assertions.
+ // 2. if any assertion combinations are known to fail (e.g. \b\B), the entire
+ // sequence fails.
+ void Rewrite(int from, int to) {
+ DCHECK_GT(to, from + 1);
+
+ // Bitfield of all seen assertions.
+ uint32_t seen_assertions = 0;
+ STATIC_ASSERT(RegExpAssertion::LAST_TYPE < kUInt32Size * kBitsPerByte);
+
+ // Flags must match for folding.
+ JSRegExp::Flags flags = terms_->at(from)->AsAssertion()->flags();
+ bool saw_mismatched_flags = false;
+
+ for (int i = from; i < to; i++) {
+ RegExpAssertion* t = terms_->at(i)->AsAssertion();
+ if (t->flags() != flags) saw_mismatched_flags = true;
+ const uint32_t bit = 1 << t->assertion_type();
+
+ if ((seen_assertions & bit) && !saw_mismatched_flags) {
+ // Fold duplicates.
+ terms_->Set(i, new (zone_) RegExpEmpty());
+ }
+
+ seen_assertions |= bit;
+ }
+
+ // Collapse failures.
+ const uint32_t always_fails_mask =
+ 1 << RegExpAssertion::BOUNDARY | 1 << RegExpAssertion::NON_BOUNDARY;
+ if ((seen_assertions & always_fails_mask) == always_fails_mask) {
+ ReplaceSequenceWithFailure(from, to);
+ }
+ }
+
+ void ReplaceSequenceWithFailure(int from, int to) {
+ // Replace the entire sequence with a single node that always fails.
+ // TODO(jgruber): Consider adding an explicit Fail kind. Until then, the
+ // negated '*' (everything) range serves the purpose.
+ ZoneList<CharacterRange>* ranges =
+ new (zone_) ZoneList<CharacterRange>(0, zone_);
+ RegExpCharacterClass* cc =
+ new (zone_) RegExpCharacterClass(zone_, ranges, JSRegExp::Flags());
+ terms_->Set(from, cc);
+
+ // Zero out the rest.
+ RegExpEmpty* empty = new (zone_) RegExpEmpty();
+ for (int i = from + 1; i < to; i++) terms_->Set(i, empty);
+ }
+
+ private:
+ AssertionSequenceRewriter(ZoneList<RegExpTree*>* terms, Zone* zone)
+ : zone_(zone), terms_(terms) {}
+
+ Zone* zone_;
+ ZoneList<RegExpTree*>* terms_;
+};
+
+} // namespace
+
+RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ ZoneList<RegExpTree*>* children = nodes();
+
+ AssertionSequenceRewriter::MaybeRewrite(children, compiler->zone());
+
+ RegExpNode* current = on_success;
+ if (compiler->read_backward()) {
+ for (int i = 0; i < children->length(); i++) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
+ } else {
+ for (int i = children->length() - 1; i >= 0; i--) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
+ }
+ return current;
+}
+
+static void AddClass(const int* elmv, int elmc,
+ ZoneList<CharacterRange>* ranges, Zone* zone) {
+ elmc--;
+ DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
+ for (int i = 0; i < elmc; i += 2) {
+ DCHECK(elmv[i] < elmv[i + 1]);
+ ranges->Add(CharacterRange::Range(elmv[i], elmv[i + 1] - 1), zone);
+ }
+}
+
+static void AddClassNegated(const int* elmv, int elmc,
+ ZoneList<CharacterRange>* ranges, Zone* zone) {
+ elmc--;
+ DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
+ DCHECK_NE(0x0000, elmv[0]);
+ DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
+ uc16 last = 0x0000;
+ for (int i = 0; i < elmc; i += 2) {
+ DCHECK(last <= elmv[i] - 1);
+ DCHECK(elmv[i] < elmv[i + 1]);
+ ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
+ last = elmv[i + 1];
+ }
+ ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
+}
+
+void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
+ bool add_unicode_case_equivalents,
+ Zone* zone) {
+ if (add_unicode_case_equivalents && (type == 'w' || type == 'W')) {
+ // See #sec-runtime-semantics-wordcharacters-abstract-operation
+ // In case of unicode and ignore_case, we need to create the closure over
+ // case equivalent characters before negating.
+ ZoneList<CharacterRange>* new_ranges =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ AddClass(kWordRanges, kWordRangeCount, new_ranges, zone);
+ AddUnicodeCaseEquivalents(new_ranges, zone);
+ if (type == 'W') {
+ ZoneList<CharacterRange>* negated =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::Negate(new_ranges, negated, zone);
+ new_ranges = negated;
+ }
+ ranges->AddAll(*new_ranges, zone);
+ return;
+ }
+ AddClassEscape(type, ranges, zone);
+}
+
+void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
+ switch (type) {
+ case 's':
+ AddClass(kSpaceRanges, kSpaceRangeCount, ranges, zone);
+ break;
+ case 'S':
+ AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges, zone);
+ break;
+ case 'w':
+ AddClass(kWordRanges, kWordRangeCount, ranges, zone);
+ break;
+ case 'W':
+ AddClassNegated(kWordRanges, kWordRangeCount, ranges, zone);
+ break;
+ case 'd':
+ AddClass(kDigitRanges, kDigitRangeCount, ranges, zone);
+ break;
+ case 'D':
+ AddClassNegated(kDigitRanges, kDigitRangeCount, ranges, zone);
+ break;
+ case '.':
+ AddClassNegated(kLineTerminatorRanges, kLineTerminatorRangeCount, ranges,
+ zone);
+ break;
+ // This is not a character range as defined by the spec but a
+ // convenient shorthand for a character class that matches any
+ // character.
+ case '*':
+ ranges->Add(CharacterRange::Everything(), zone);
+ break;
+ // This is the set of characters matched by the $ and ^ symbols
+ // in multiline mode.
+ case 'n':
+ AddClass(kLineTerminatorRanges, kLineTerminatorRangeCount, ranges, zone);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Vector<const int> CharacterRange::GetWordBounds() {
+ return Vector<const int>(kWordRanges, kWordRangeCount - 1);
+}
+
+#ifdef V8_INTL_SUPPORT
+struct IgnoreSet {
+ IgnoreSet() : set(BuildIgnoreSet()) {}
+ const icu::UnicodeSet set;
+};
+
+struct SpecialAddSet {
+ SpecialAddSet() : set(BuildSpecialAddSet()) {}
+ const icu::UnicodeSet set;
+};
+
+icu::UnicodeSet BuildAsciiAToZSet() {
+ icu::UnicodeSet set('a', 'z');
+ set.add('A', 'Z');
+ set.freeze();
+ return set;
+}
+
+struct AsciiAToZSet {
+ AsciiAToZSet() : set(BuildAsciiAToZSet()) {}
+ const icu::UnicodeSet set;
+};
+
+static base::LazyInstance<IgnoreSet>::type ignore_set =
+ LAZY_INSTANCE_INITIALIZER;
+
+static base::LazyInstance<SpecialAddSet>::type special_add_set =
+ LAZY_INSTANCE_INITIALIZER;
+
+static base::LazyInstance<AsciiAToZSet>::type ascii_a_to_z_set =
+ LAZY_INSTANCE_INITIALIZER;
+#endif // V8_INTL_SUPPORT
+
+// static
+void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool is_one_byte) {
+ CharacterRange::Canonicalize(ranges);
+ int range_count = ranges->length();
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeSet others;
+ for (int i = 0; i < range_count; i++) {
+ CharacterRange range = ranges->at(i);
+ uc32 from = range.from();
+ if (from > String::kMaxUtf16CodeUnit) continue;
+ uc32 to = Min(range.to(), String::kMaxUtf16CodeUnit);
+ // Nothing to be done for surrogates.
+ if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
+ if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
+ if (from > String::kMaxOneByteCharCode) continue;
+ if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
+ }
+ others.add(from, to);
+ }
+
+ // Set of characters already added to ranges that do not need to be added
+ // again.
+ icu::UnicodeSet already_added(others);
+
+ // Set of characters in ranges that are in the 52 ASCII characters [a-zA-Z].
+ icu::UnicodeSet in_ascii_a_to_z(others);
+ in_ascii_a_to_z.retainAll(ascii_a_to_z_set.Pointer()->set);
+
+ // Remove all chars in [a-zA-Z] from others.
+ others.removeAll(in_ascii_a_to_z);
+
+ // Set of characters in ranges that are overlapping with special add set.
+ icu::UnicodeSet in_special_add(others);
+ in_special_add.retainAll(special_add_set.Pointer()->set);
+
+ others.removeAll(in_special_add);
+
+ // Ignore all chars in ignore set.
+ others.removeAll(ignore_set.Pointer()->set);
+
+ // For most of the chars in ranges that is still in others, find the case
+ // equivlant set by calling closeOver(USET_CASE_INSENSITIVE).
+ others.closeOver(USET_CASE_INSENSITIVE);
+
+ // Because closeOver(USET_CASE_INSENSITIVE) may add ASCII [a-zA-Z] to others,
+ // but ECMA262 "i" mode won't consider that, remove them from others.
+ // Ex: U+017F add 'S' and 's' to others.
+ others.removeAll(ascii_a_to_z_set.Pointer()->set);
+
+ // Special handling for in_ascii_a_to_z.
+ for (int32_t i = 0; i < in_ascii_a_to_z.getRangeCount(); i++) {
+ UChar32 start = in_ascii_a_to_z.getRangeStart(i);
+ UChar32 end = in_ascii_a_to_z.getRangeEnd(i);
+ // Check if it is uppercase A-Z by checking bit 6.
+ if (start & 0x0020) {
+ // Add the lowercases
+ others.add(start & 0x005F, end & 0x005F);
+ } else {
+ // Add the uppercases
+ others.add(start | 0x0020, end | 0x0020);
+ }
+ }
+
+ // Special handling for chars in "Special Add" set.
+ for (int32_t i = 0; i < in_special_add.getRangeCount(); i++) {
+ UChar32 end = in_special_add.getRangeEnd(i);
+ for (UChar32 ch = in_special_add.getRangeStart(i); ch <= end; ch++) {
+ // Add the uppercase of this character if itself is not an uppercase
+ // character.
+ // Note: The if condiction cannot be u_islower(ch) because ch could be
+ // neither uppercase nor lowercase but Mn.
+ if (!u_isupper(ch)) {
+ others.add(u_toupper(ch));
+ }
+ icu::UnicodeSet candidates(ch, ch);
+ candidates.closeOver(USET_CASE_INSENSITIVE);
+ for (int32_t j = 0; j < candidates.getRangeCount(); j++) {
+ UChar32 end2 = candidates.getRangeEnd(j);
+ for (UChar32 ch2 = candidates.getRangeStart(j); ch2 <= end2; ch2++) {
+ // Add character that is not uppercase to others.
+ if (!u_isupper(ch2)) {
+ others.add(ch2);
+ }
+ }
+ }
+ }
+ }
+
+ // Remove all characters which already in the ranges.
+ others.removeAll(already_added);
+
+ // Add others to the ranges
+ for (int32_t i = 0; i < others.getRangeCount(); i++) {
+ UChar32 from = others.getRangeStart(i);
+ UChar32 to = others.getRangeEnd(i);
+ if (from == to) {
+ ranges->Add(CharacterRange::Singleton(from), zone);
+ } else {
+ ranges->Add(CharacterRange::Range(from, to), zone);
+ }
+ }
+#else
+ for (int i = 0; i < range_count; i++) {
+ CharacterRange range = ranges->at(i);
+ uc32 bottom = range.from();
+ if (bottom > String::kMaxUtf16CodeUnit) continue;
+ uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
+ // Nothing to be done for surrogates.
+ if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
+ if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
+ if (bottom > String::kMaxOneByteCharCode) continue;
+ if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
+ }
+ unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ if (top == bottom) {
+ // If this is a singleton we just expand the one character.
+ int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
+ for (int i = 0; i < length; i++) {
+ uc32 chr = chars[i];
+ if (chr != bottom) {
+ ranges->Add(CharacterRange::Singleton(chars[i]), zone);
+ }
+ }
+ } else {
+ // If this is a range we expand the characters block by block, expanding
+ // contiguous subranges (blocks) one at a time. The approach is as
+ // follows. For a given start character we look up the remainder of the
+ // block that contains it (represented by the end point), for instance we
+ // find 'z' if the character is 'c'. A block is characterized by the
+ // property that all characters uncanonicalize in the same way, except
+ // that each entry in the result is incremented by the distance from the
+ // first element. So a-z is a block because 'a' uncanonicalizes to ['a',
+ // 'A'] and the k'th letter uncanonicalizes to ['a' + k, 'A' + k]. Once
+ // we've found the end point we look up its uncanonicalization and
+ // produce a range for each element. For instance for [c-f] we look up
+ // ['z', 'Z'] and produce [c-f] and [C-F]. We then only add a range if
+ // it is not already contained in the input, so [c-f] will be skipped but
+ // [C-F] will be added. If this range is not completely contained in a
+ // block we do this for all the blocks covered by the range (handling
+ // characters that is not in a block as a "singleton block").
+ unibrow::uchar equivalents[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ int pos = bottom;
+ while (pos <= top) {
+ int length =
+ isolate->jsregexp_canonrange()->get(pos, '\0', equivalents);
+ uc32 block_end;
+ if (length == 0) {
+ block_end = pos;
+ } else {
+ DCHECK_EQ(1, length);
+ block_end = equivalents[0];
+ }
+ int end = (block_end > top) ? top : block_end;
+ length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0',
+ equivalents);
+ for (int i = 0; i < length; i++) {
+ uc32 c = equivalents[i];
+ uc32 range_from = c - (block_end - pos);
+ uc32 range_to = c - (block_end - end);
+ if (!(bottom <= range_from && range_to <= top)) {
+ ranges->Add(CharacterRange::Range(range_from, range_to), zone);
+ }
+ }
+ pos = end + 1;
+ }
+ }
+ }
+#endif // V8_INTL_SUPPORT
+}
+
+bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
+ DCHECK_NOT_NULL(ranges);
+ int n = ranges->length();
+ if (n <= 1) return true;
+ int max = ranges->at(0).to();
+ for (int i = 1; i < n; i++) {
+ CharacterRange next_range = ranges->at(i);
+ if (next_range.from() <= max + 1) return false;
+ max = next_range.to();
+ }
+ return true;
+}
+
+ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
+ if (ranges_ == nullptr) {
+ ranges_ = new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::AddClassEscape(standard_set_type_, ranges_, false, zone);
+ }
+ return ranges_;
+}
+
+// Move a number of elements in a zonelist to another position
+// in the same list. Handles overlapping source and target areas.
+static void MoveRanges(ZoneList<CharacterRange>* list, int from, int to,
+ int count) {
+ // Ranges are potentially overlapping.
+ if (from < to) {
+ for (int i = count - 1; i >= 0; i--) {
+ list->at(to + i) = list->at(from + i);
+ }
+ } else {
+ for (int i = 0; i < count; i++) {
+ list->at(to + i) = list->at(from + i);
+ }
+ }
+}
+
+static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list, int count,
+ CharacterRange insert) {
+ // Inserts a range into list[0..count[, which must be sorted
+ // by from value and non-overlapping and non-adjacent, using at most
+ // list[0..count] for the result. Returns the number of resulting
+ // canonicalized ranges. Inserting a range may collapse existing ranges into
+ // fewer ranges, so the return value can be anything in the range 1..count+1.
+ uc32 from = insert.from();
+ uc32 to = insert.to();
+ int start_pos = 0;
+ int end_pos = count;
+ for (int i = count - 1; i >= 0; i--) {
+ CharacterRange current = list->at(i);
+ if (current.from() > to + 1) {
+ end_pos = i;
+ } else if (current.to() + 1 < from) {
+ start_pos = i + 1;
+ break;
+ }
+ }
+
+ // Inserted range overlaps, or is adjacent to, ranges at positions
+ // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
+ // not affected by the insertion.
+ // If start_pos == end_pos, the range must be inserted before start_pos.
+ // if start_pos < end_pos, the entire range from start_pos to end_pos
+ // must be merged with the insert range.
+
+ if (start_pos == end_pos) {
+ // Insert between existing ranges at position start_pos.
+ if (start_pos < count) {
+ MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
+ }
+ list->at(start_pos) = insert;
+ return count + 1;
+ }
+ if (start_pos + 1 == end_pos) {
+ // Replace single existing range at position start_pos.
+ CharacterRange to_replace = list->at(start_pos);
+ int new_from = Min(to_replace.from(), from);
+ int new_to = Max(to_replace.to(), to);
+ list->at(start_pos) = CharacterRange::Range(new_from, new_to);
+ return count;
+ }
+ // Replace a number of existing ranges from start_pos to end_pos - 1.
+ // Move the remaining ranges down.
+
+ int new_from = Min(list->at(start_pos).from(), from);
+ int new_to = Max(list->at(end_pos - 1).to(), to);
+ if (end_pos < count) {
+ MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
+ }
+ list->at(start_pos) = CharacterRange::Range(new_from, new_to);
+ return count - (end_pos - start_pos) + 1;
+}
+
+void CharacterSet::Canonicalize() {
+ // Special/default classes are always considered canonical. The result
+ // of calling ranges() will be sorted.
+ if (ranges_ == nullptr) return;
+ CharacterRange::Canonicalize(ranges_);
+}
+
+void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
+ if (character_ranges->length() <= 1) return;
+ // Check whether ranges are already canonical (increasing, non-overlapping,
+ // non-adjacent).
+ int n = character_ranges->length();
+ int max = character_ranges->at(0).to();
+ int i = 1;
+ while (i < n) {
+ CharacterRange current = character_ranges->at(i);
+ if (current.from() <= max + 1) {
+ break;
+ }
+ max = current.to();
+ i++;
+ }
+ // Canonical until the i'th range. If that's all of them, we are done.
+ if (i == n) return;
+
+ // The ranges at index i and forward are not canonicalized. Make them so by
+ // doing the equivalent of insertion sort (inserting each into the previous
+ // list, in order).
+ // Notice that inserting a range can reduce the number of ranges in the
+ // result due to combining of adjacent and overlapping ranges.
+ int read = i; // Range to insert.
+ int num_canonical = i; // Length of canonicalized part of list.
+ do {
+ num_canonical = InsertRangeInCanonicalList(character_ranges, num_canonical,
+ character_ranges->at(read));
+ read++;
+ } while (read < n);
+ character_ranges->Rewind(num_canonical);
+
+ DCHECK(CharacterRange::IsCanonical(character_ranges));
+}
+
+void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
+ ZoneList<CharacterRange>* negated_ranges,
+ Zone* zone) {
+ DCHECK(CharacterRange::IsCanonical(ranges));
+ DCHECK_EQ(0, negated_ranges->length());
+ int range_count = ranges->length();
+ uc32 from = 0;
+ int i = 0;
+ if (range_count > 0 && ranges->at(0).from() == 0) {
+ from = ranges->at(0).to() + 1;
+ i = 1;
+ }
+ while (i < range_count) {
+ CharacterRange range = ranges->at(i);
+ negated_ranges->Add(CharacterRange::Range(from, range.from() - 1), zone);
+ from = range.to() + 1;
+ i++;
+ }
+ if (from < String::kMaxCodePoint) {
+ negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
+ zone);
+ }
+}
+
+// Scoped object to keep track of how much we unroll quantifier loops in the
+// regexp graph generator.
+class RegExpExpansionLimiter {
+ public:
+ static const int kMaxExpansionFactor = 6;
+ RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
+ : compiler_(compiler),
+ saved_expansion_factor_(compiler->current_expansion_factor()),
+ ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
+ DCHECK_LT(0, factor);
+ if (ok_to_expand_) {
+ if (factor > kMaxExpansionFactor) {
+ // Avoid integer overflow of the current expansion factor.
+ ok_to_expand_ = false;
+ compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
+ } else {
+ int new_factor = saved_expansion_factor_ * factor;
+ ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
+ compiler->set_current_expansion_factor(new_factor);
+ }
+ }
+ }
+
+ ~RegExpExpansionLimiter() {
+ compiler_->set_current_expansion_factor(saved_expansion_factor_);
+ }
+
+ bool ok_to_expand() { return ok_to_expand_; }
+
+ private:
+ RegExpCompiler* compiler_;
+ int saved_expansion_factor_;
+ bool ok_to_expand_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
+};
+
+RegExpNode* RegExpQuantifier::ToNode(int min, int max, bool is_greedy,
+ RegExpTree* body, RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ bool not_at_start) {
+ // x{f, t} becomes this:
+ //
+ // (r++)<-.
+ // | `
+ // | (x)
+ // v ^
+ // (r=0)-->(?)---/ [if r < t]
+ // |
+ // [if r >= f] \----> ...
+ //
+
+ // 15.10.2.5 RepeatMatcher algorithm.
+ // The parser has already eliminated the case where max is 0. In the case
+ // where max_match is zero the parser has removed the quantifier if min was
+ // > 0 and removed the atom if min was 0. See AddQuantifierToAtom.
+
+ // If we know that we cannot match zero length then things are a little
+ // simpler since we don't need to make the special zero length match check
+ // from step 2.1. If the min and max are small we can unroll a little in
+ // this case.
+ static const int kMaxUnrolledMinMatches = 3; // Unroll (foo)+ and (foo){3,}
+ static const int kMaxUnrolledMaxMatches = 3; // Unroll (foo)? and (foo){x,3}
+ if (max == 0) return on_success; // This can happen due to recursion.
+ bool body_can_be_empty = (body->min_match() == 0);
+ int body_start_reg = RegExpCompiler::kNoRegister;
+ Interval capture_registers = body->CaptureRegisters();
+ bool needs_capture_clearing = !capture_registers.is_empty();
+ Zone* zone = compiler->zone();
+
+ if (body_can_be_empty) {
+ body_start_reg = compiler->AllocateRegister();
+ } else if (compiler->optimize() && !needs_capture_clearing) {
+ // Only unroll if there are no captures and the body can't be
+ // empty.
+ {
+ RegExpExpansionLimiter limiter(compiler, min + ((max != min) ? 1 : 0));
+ if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
+ int new_max = (max == kInfinity) ? max : max - min;
+ // Recurse once to get the loop or optional matches after the fixed
+ // ones.
+ RegExpNode* answer =
+ ToNode(0, new_max, is_greedy, body, compiler, on_success, true);
+ // Unroll the forced matches from 0 to min. This can cause chains of
+ // TextNodes (which the parser does not generate). These should be
+ // combined if it turns out they hinder good code generation.
+ for (int i = 0; i < min; i++) {
+ answer = body->ToNode(compiler, answer);
+ }
+ return answer;
+ }
+ }
+ if (max <= kMaxUnrolledMaxMatches && min == 0) {
+ DCHECK_LT(0, max); // Due to the 'if' above.
+ RegExpExpansionLimiter limiter(compiler, max);
+ if (limiter.ok_to_expand()) {
+ // Unroll the optional matches up to max.
+ RegExpNode* answer = on_success;
+ for (int i = 0; i < max; i++) {
+ ChoiceNode* alternation = new (zone) ChoiceNode(2, zone);
+ if (is_greedy) {
+ alternation->AddAlternative(
+ GuardedAlternative(body->ToNode(compiler, answer)));
+ alternation->AddAlternative(GuardedAlternative(on_success));
+ } else {
+ alternation->AddAlternative(GuardedAlternative(on_success));
+ alternation->AddAlternative(
+ GuardedAlternative(body->ToNode(compiler, answer)));
+ }
+ answer = alternation;
+ if (not_at_start && !compiler->read_backward()) {
+ alternation->set_not_at_start();
+ }
+ }
+ return answer;
+ }
+ }
+ }
+ bool has_min = min > 0;
+ bool has_max = max < RegExpTree::kInfinity;
+ bool needs_counter = has_min || has_max;
+ int reg_ctr = needs_counter ? compiler->AllocateRegister()
+ : RegExpCompiler::kNoRegister;
+ LoopChoiceNode* center = new (zone)
+ LoopChoiceNode(body->min_match() == 0, compiler->read_backward(), zone);
+ if (not_at_start && !compiler->read_backward()) center->set_not_at_start();
+ RegExpNode* loop_return =
+ needs_counter ? static_cast<RegExpNode*>(
+ ActionNode::IncrementRegister(reg_ctr, center))
+ : static_cast<RegExpNode*>(center);
+ if (body_can_be_empty) {
+ // If the body can be empty we need to check if it was and then
+ // backtrack.
+ loop_return =
+ ActionNode::EmptyMatchCheck(body_start_reg, reg_ctr, min, loop_return);
+ }
+ RegExpNode* body_node = body->ToNode(compiler, loop_return);
+ if (body_can_be_empty) {
+ // If the body can be empty we need to store the start position
+ // so we can bail out if it was empty.
+ body_node = ActionNode::StorePosition(body_start_reg, false, body_node);
+ }
+ if (needs_capture_clearing) {
+ // Before entering the body of this loop we need to clear captures.
+ body_node = ActionNode::ClearCaptures(capture_registers, body_node);
+ }
+ GuardedAlternative body_alt(body_node);
+ if (has_max) {
+ Guard* body_guard = new (zone) Guard(reg_ctr, Guard::LT, max);
+ body_alt.AddGuard(body_guard, zone);
+ }
+ GuardedAlternative rest_alt(on_success);
+ if (has_min) {
+ Guard* rest_guard = new (compiler->zone()) Guard(reg_ctr, Guard::GEQ, min);
+ rest_alt.AddGuard(rest_guard, zone);
+ }
+ if (is_greedy) {
+ center->AddLoopAlternative(body_alt);
+ center->AddContinueAlternative(rest_alt);
+ } else {
+ center->AddContinueAlternative(rest_alt);
+ center->AddLoopAlternative(body_alt);
+ }
+ if (needs_counter) {
+ return ActionNode::SetRegister(reg_ctr, 0, center);
+ } else {
+ return center;
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
new file mode 100644
index 0000000000..c643f988c0
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -0,0 +1,3551 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-compiler.h"
+
+#include "src/diagnostics/code-tracer.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-macro-assembler-arch.h"
+#include "src/regexp/regexp-macro-assembler-tracer.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/zone/zone-list-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "unicode/locid.h"
+#include "unicode/uniset.h"
+#include "unicode/utypes.h"
+#endif // V8_INTL_SUPPORT
+
+namespace v8 {
+namespace internal {
+
+using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+
+// -------------------------------------------------------------------
+// Implementation of the Irregexp regular expression engine.
+//
+// The Irregexp regular expression engine is intended to be a complete
+// implementation of ECMAScript regular expressions. It generates either
+// bytecodes or native code.
+
+// The Irregexp regexp engine is structured in three steps.
+// 1) The parser generates an abstract syntax tree. See ast.cc.
+// 2) From the AST a node network is created. The nodes are all
+// subclasses of RegExpNode. The nodes represent states when
+// executing a regular expression. Several optimizations are
+// performed on the node network.
+// 3) From the nodes we generate either byte codes or native code
+// that can actually execute the regular expression (perform
+// the search). The code generation step is described in more
+// detail below.
+
+// Code generation.
+//
+// The nodes are divided into four main categories.
+// * Choice nodes
+// These represent places where the regular expression can
+// match in more than one way. For example on entry to an
+// alternation (foo|bar) or a repetition (*, +, ? or {}).
+// * Action nodes
+// These represent places where some action should be
+// performed. Examples include recording the current position
+// in the input string to a register (in order to implement
+// captures) or other actions on register for example in order
+// to implement the counters needed for {} repetitions.
+// * Matching nodes
+// These attempt to match some element part of the input string.
+// Examples of elements include character classes, plain strings
+// or back references.
+// * End nodes
+// These are used to implement the actions required on finding
+// a successful match or failing to find a match.
+//
+// The code generated (whether as byte codes or native code) maintains
+// some state as it runs. This consists of the following elements:
+//
+// * The capture registers. Used for string captures.
+// * Other registers. Used for counters etc.
+// * The current position.
+// * The stack of backtracking information. Used when a matching node
+// fails to find a match and needs to try an alternative.
+//
+// Conceptual regular expression execution model:
+//
+// There is a simple conceptual model of regular expression execution
+// which will be presented first. The actual code generated is a more
+// efficient simulation of the simple conceptual model:
+//
+// * Choice nodes are implemented as follows:
+// For each choice except the last {
+// push current position
+// push backtrack code location
+// <generate code to test for choice>
+// backtrack code location:
+// pop current position
+// }
+// <generate code to test for last choice>
+//
+// * Actions nodes are generated as follows
+// <push affected registers on backtrack stack>
+// <generate code to perform action>
+// push backtrack code location
+// <generate code to test for following nodes>
+// backtrack code location:
+// <pop affected registers to restore their state>
+// <pop backtrack location from stack and go to it>
+//
+// * Matching nodes are generated as follows:
+// if input string matches at current position
+// update current position
+// <generate code to test for following nodes>
+// else
+// <pop backtrack location from stack and go to it>
+//
+// Thus it can be seen that the current position is saved and restored
+// by the choice nodes, whereas the registers are saved and restored by
+// by the action nodes that manipulate them.
+//
+// The other interesting aspect of this model is that nodes are generated
+// at the point where they are needed by a recursive call to Emit(). If
+// the node has already been code generated then the Emit() call will
+// generate a jump to the previously generated code instead. In order to
+// limit recursion it is possible for the Emit() function to put the node
+// on a work list for later generation and instead generate a jump. The
+// destination of the jump is resolved later when the code is generated.
+//
+// Actual regular expression code generation.
+//
+// Code generation is actually more complicated than the above. In order
+// to improve the efficiency of the generated code some optimizations are
+// performed
+//
+// * Choice nodes have 1-character lookahead.
+// A choice node looks at the following character and eliminates some of
+// the choices immediately based on that character. This is not yet
+// implemented.
+// * Simple greedy loops store reduced backtracking information.
+// A quantifier like /.*foo/m will greedily match the whole input. It will
+// then need to backtrack to a point where it can match "foo". The naive
+// implementation of this would push each character position onto the
+// backtracking stack, then pop them off one by one. This would use space
+// proportional to the length of the input string. However since the "."
+// can only match in one way and always has a constant length (in this case
+// of 1) it suffices to store the current position on the top of the stack
+// once. Matching now becomes merely incrementing the current position and
+// backtracking becomes decrementing the current position and checking the
+// result against the stored current position. This is faster and saves
+// space.
+// * The current state is virtualized.
+// This is used to defer expensive operations until it is clear that they
+// are needed and to generate code for a node more than once, allowing
+// specialized an efficient versions of the code to be created. This is
+// explained in the section below.
+//
+// Execution state virtualization.
+//
+// Instead of emitting code, nodes that manipulate the state can record their
+// manipulation in an object called the Trace. The Trace object can record a
+// current position offset, an optional backtrack code location on the top of
+// the virtualized backtrack stack and some register changes. When a node is
+// to be emitted it can flush the Trace or update it. Flushing the Trace
+// will emit code to bring the actual state into line with the virtual state.
+// Avoiding flushing the state can postpone some work (e.g. updates of capture
+// registers). Postponing work can save time when executing the regular
+// expression since it may be found that the work never has to be done as a
+// failure to match can occur. In addition it is much faster to jump to a
+// known backtrack code location than it is to pop an unknown backtrack
+// location from the stack and jump there.
+//
+// The virtual state found in the Trace affects code generation. For example
+// the virtual state contains the difference between the actual current
+// position and the virtual current position, and matching code needs to use
+// this offset to attempt a match in the correct location of the input
+// string. Therefore code generated for a non-trivial trace is specialized
+// to that trace. The code generator therefore has the ability to generate
+// code for each node several times. In order to limit the size of the
+// generated code there is an arbitrary limit on how many specialized sets of
+// code may be generated for a given node. If the limit is reached, the
+// trace is flushed and a generic version of the code for a node is emitted.
+// This is subsequently used for that node. The code emitted for non-generic
+// trace is not recorded in the node and so it cannot currently be reused in
+// the event that code generation is requested for an identical trace.
+
+void RegExpTree::AppendToText(RegExpText* text, Zone* zone) { UNREACHABLE(); }
+
+void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::Atom(this), zone);
+}
+
+void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::CharClass(this), zone);
+}
+
+void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
+ for (int i = 0; i < elements()->length(); i++)
+ text->AddElement(elements()->at(i), zone);
+}
+
+TextElement TextElement::Atom(RegExpAtom* atom) {
+ return TextElement(ATOM, atom);
+}
+
+TextElement TextElement::CharClass(RegExpCharacterClass* char_class) {
+ return TextElement(CHAR_CLASS, char_class);
+}
+
+int TextElement::length() const {
+ switch (text_type()) {
+ case ATOM:
+ return atom()->length();
+
+ case CHAR_CLASS:
+ return 1;
+ }
+ UNREACHABLE();
+}
+
+class RecursionCheck {
+ public:
+ explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) {
+ compiler->IncrementRecursionDepth();
+ }
+ ~RecursionCheck() { compiler_->DecrementRecursionDepth(); }
+
+ private:
+ RegExpCompiler* compiler_;
+};
+
+// Attempts to compile the regexp using an Irregexp code generator. Returns
+// a fixed array or a null handle depending on whether it succeeded.
+RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
+ bool one_byte)
+ : next_register_(2 * (capture_count + 1)),
+ unicode_lookaround_stack_register_(kNoRegister),
+ unicode_lookaround_position_register_(kNoRegister),
+ work_list_(nullptr),
+ recursion_depth_(0),
+ one_byte_(one_byte),
+ reg_exp_too_big_(false),
+ limiting_recursion_(false),
+ optimize_(FLAG_regexp_optimization),
+ read_backward_(false),
+ current_expansion_factor_(1),
+ frequency_collator_(),
+ isolate_(isolate),
+ zone_(zone) {
+ accept_ = new (zone) EndNode(EndNode::ACCEPT, zone);
+ DCHECK_GE(RegExpMacroAssembler::kMaxRegister, next_register_ - 1);
+}
+
+RegExpCompiler::CompilationResult RegExpCompiler::Assemble(
+ Isolate* isolate, RegExpMacroAssembler* macro_assembler, RegExpNode* start,
+ int capture_count, Handle<String> pattern) {
+#ifdef DEBUG
+ if (FLAG_trace_regexp_assembler)
+ macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
+ else
+#endif
+ macro_assembler_ = macro_assembler;
+
+ std::vector<RegExpNode*> work_list;
+ work_list_ = &work_list;
+ Label fail;
+ macro_assembler_->PushBacktrack(&fail);
+ Trace new_trace;
+ start->Emit(this, &new_trace);
+ macro_assembler_->Bind(&fail);
+ macro_assembler_->Fail();
+ while (!work_list.empty()) {
+ RegExpNode* node = work_list.back();
+ work_list.pop_back();
+ node->set_on_work_list(false);
+ if (!node->label()->is_bound()) node->Emit(this, &new_trace);
+ }
+ if (reg_exp_too_big_) {
+ macro_assembler_->AbortedCodeGeneration();
+ return CompilationResult::RegExpTooBig();
+ }
+
+ Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
+ isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
+ work_list_ = nullptr;
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code && !FLAG_regexp_interpret_all) {
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ OFStream os(trace_scope.file());
+ Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
+ }
+#endif
+#ifdef DEBUG
+ if (FLAG_trace_regexp_assembler) {
+ delete macro_assembler_;
+ }
+#endif
+ return {*code, next_register_};
+}
+
+bool Trace::DeferredAction::Mentions(int that) {
+ if (action_type() == ActionNode::CLEAR_CAPTURES) {
+ Interval range = static_cast<DeferredClearCaptures*>(this)->range();
+ return range.Contains(that);
+ } else {
+ return reg() == that;
+ }
+}
+
+bool Trace::mentions_reg(int reg) {
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->Mentions(reg)) return true;
+ }
+ return false;
+}
+
+bool Trace::GetStoredPosition(int reg, int* cp_offset) {
+ DCHECK_EQ(0, *cp_offset);
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->Mentions(reg)) {
+ if (action->action_type() == ActionNode::STORE_POSITION) {
+ *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+ return false;
+}
+
+// A (dynamically-sized) set of unsigned integers that behaves especially well
+// on small integers (< kFirstLimit). May do zone-allocation.
+class DynamicBitSet : public ZoneObject {
+ public:
+ V8_EXPORT_PRIVATE bool Get(unsigned value) const {
+ if (value < kFirstLimit) {
+ return (first_ & (1 << value)) != 0;
+ } else if (remaining_ == nullptr) {
+ return false;
+ } else {
+ return remaining_->Contains(value);
+ }
+ }
+
+ // Destructively set a value in this set.
+ void Set(unsigned value, Zone* zone) {
+ if (value < kFirstLimit) {
+ first_ |= (1 << value);
+ } else {
+ if (remaining_ == nullptr)
+ remaining_ = new (zone) ZoneList<unsigned>(1, zone);
+ if (remaining_->is_empty() || !remaining_->Contains(value))
+ remaining_->Add(value, zone);
+ }
+ }
+
+ private:
+ static constexpr unsigned kFirstLimit = 32;
+
+ uint32_t first_ = 0;
+ ZoneList<unsigned>* remaining_ = nullptr;
+};
+
+int Trace::FindAffectedRegisters(DynamicBitSet* affected_registers,
+ Zone* zone) {
+ int max_register = RegExpCompiler::kNoRegister;
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->action_type() == ActionNode::CLEAR_CAPTURES) {
+ Interval range = static_cast<DeferredClearCaptures*>(action)->range();
+ for (int i = range.from(); i <= range.to(); i++)
+ affected_registers->Set(i, zone);
+ if (range.to() > max_register) max_register = range.to();
+ } else {
+ affected_registers->Set(action->reg(), zone);
+ if (action->reg() > max_register) max_register = action->reg();
+ }
+ }
+ return max_register;
+}
+
+void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
+ int max_register,
+ const DynamicBitSet& registers_to_pop,
+ const DynamicBitSet& registers_to_clear) {
+ for (int reg = max_register; reg >= 0; reg--) {
+ if (registers_to_pop.Get(reg)) {
+ assembler->PopRegister(reg);
+ } else if (registers_to_clear.Get(reg)) {
+ int clear_to = reg;
+ while (reg > 0 && registers_to_clear.Get(reg - 1)) {
+ reg--;
+ }
+ assembler->ClearRegisters(reg, clear_to);
+ }
+ }
+}
+
+void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
+ int max_register,
+ const DynamicBitSet& affected_registers,
+ DynamicBitSet* registers_to_pop,
+ DynamicBitSet* registers_to_clear,
+ Zone* zone) {
+ // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
+ const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
+
+ // Count pushes performed to force a stack limit check occasionally.
+ int pushes = 0;
+
+ for (int reg = 0; reg <= max_register; reg++) {
+ if (!affected_registers.Get(reg)) {
+ continue;
+ }
+
+ // The chronologically first deferred action in the trace
+ // is used to infer the action needed to restore a register
+ // to its previous state (or not, if it's safe to ignore it).
+ enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR };
+ DeferredActionUndoType undo_action = IGNORE;
+
+ int value = 0;
+ bool absolute = false;
+ bool clear = false;
+ static const int kNoStore = kMinInt;
+ int store_position = kNoStore;
+ // This is a little tricky because we are scanning the actions in reverse
+ // historical order (newest first).
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->Mentions(reg)) {
+ switch (action->action_type()) {
+ case ActionNode::SET_REGISTER: {
+ Trace::DeferredSetRegister* psr =
+ static_cast<Trace::DeferredSetRegister*>(action);
+ if (!absolute) {
+ value += psr->value();
+ absolute = true;
+ }
+ // SET_REGISTER is currently only used for newly introduced loop
+ // counters. They can have a significant previous value if they
+ // occur in a loop. TODO(lrn): Propagate this information, so
+ // we can set undo_action to IGNORE if we know there is no value to
+ // restore.
+ undo_action = RESTORE;
+ DCHECK_EQ(store_position, kNoStore);
+ DCHECK(!clear);
+ break;
+ }
+ case ActionNode::INCREMENT_REGISTER:
+ if (!absolute) {
+ value++;
+ }
+ DCHECK_EQ(store_position, kNoStore);
+ DCHECK(!clear);
+ undo_action = RESTORE;
+ break;
+ case ActionNode::STORE_POSITION: {
+ Trace::DeferredCapture* pc =
+ static_cast<Trace::DeferredCapture*>(action);
+ if (!clear && store_position == kNoStore) {
+ store_position = pc->cp_offset();
+ }
+
+ // For captures we know that stores and clears alternate.
+ // Other register, are never cleared, and if the occur
+ // inside a loop, they might be assigned more than once.
+ if (reg <= 1) {
+ // Registers zero and one, aka "capture zero", is
+ // always set correctly if we succeed. There is no
+ // need to undo a setting on backtrack, because we
+ // will set it again or fail.
+ undo_action = IGNORE;
+ } else {
+ undo_action = pc->is_capture() ? CLEAR : RESTORE;
+ }
+ DCHECK(!absolute);
+ DCHECK_EQ(value, 0);
+ break;
+ }
+ case ActionNode::CLEAR_CAPTURES: {
+ // Since we're scanning in reverse order, if we've already
+ // set the position we have to ignore historically earlier
+ // clearing operations.
+ if (store_position == kNoStore) {
+ clear = true;
+ }
+ undo_action = RESTORE;
+ DCHECK(!absolute);
+ DCHECK_EQ(value, 0);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ // Prepare for the undo-action (e.g., push if it's going to be popped).
+ if (undo_action == RESTORE) {
+ pushes++;
+ RegExpMacroAssembler::StackCheckFlag stack_check =
+ RegExpMacroAssembler::kNoStackLimitCheck;
+ if (pushes == push_limit) {
+ stack_check = RegExpMacroAssembler::kCheckStackLimit;
+ pushes = 0;
+ }
+
+ assembler->PushRegister(reg, stack_check);
+ registers_to_pop->Set(reg, zone);
+ } else if (undo_action == CLEAR) {
+ registers_to_clear->Set(reg, zone);
+ }
+ // Perform the chronologically last action (or accumulated increment)
+ // for the register.
+ if (store_position != kNoStore) {
+ assembler->WriteCurrentPositionToRegister(reg, store_position);
+ } else if (clear) {
+ assembler->ClearRegisters(reg, reg);
+ } else if (absolute) {
+ assembler->SetRegister(reg, value);
+ } else if (value != 0) {
+ assembler->AdvanceRegister(reg, value);
+ }
+ }
+}
+
+// This is called as we come into a loop choice node and some other tricky
+// nodes. It normalizes the state of the code generator to ensure we can
+// generate generic code.
+void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+ DCHECK(!is_trivial());
+
+ if (actions_ == nullptr && backtrack() == nullptr) {
+ // Here we just have some deferred cp advances to fix and we are back to
+ // a normal situation. We may also have to forget some information gained
+ // through a quick check that was already performed.
+ if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_);
+ // Create a new trivial state and generate the node with that.
+ Trace new_state;
+ successor->Emit(compiler, &new_state);
+ return;
+ }
+
+ // Generate deferred actions here along with code to undo them again.
+ DynamicBitSet affected_registers;
+
+ if (backtrack() != nullptr) {
+ // Here we have a concrete backtrack location. These are set up by choice
+ // nodes and so they indicate that we have a deferred save of the current
+ // position which we may need to emit here.
+ assembler->PushCurrentPosition();
+ }
+
+ int max_register =
+ FindAffectedRegisters(&affected_registers, compiler->zone());
+ DynamicBitSet registers_to_pop;
+ DynamicBitSet registers_to_clear;
+ PerformDeferredActions(assembler, max_register, affected_registers,
+ &registers_to_pop, &registers_to_clear,
+ compiler->zone());
+ if (cp_offset_ != 0) {
+ assembler->AdvanceCurrentPosition(cp_offset_);
+ }
+
+ // Create a new trivial state and generate the node with that.
+ Label undo;
+ assembler->PushBacktrack(&undo);
+ if (successor->KeepRecursing(compiler)) {
+ Trace new_state;
+ successor->Emit(compiler, &new_state);
+ } else {
+ compiler->AddWork(successor);
+ assembler->GoTo(successor->label());
+ }
+
+ // On backtrack we need to restore state.
+ assembler->Bind(&undo);
+ RestoreAffectedRegisters(assembler, max_register, registers_to_pop,
+ registers_to_clear);
+ if (backtrack() == nullptr) {
+ assembler->Backtrack();
+ } else {
+ assembler->PopCurrentPosition();
+ assembler->GoTo(backtrack());
+ }
+}
+
+void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+ // Omit flushing the trace. We discard the entire stack frame anyway.
+
+ if (!label()->is_bound()) {
+ // We are completely independent of the trace, since we ignore it,
+ // so this code can be used as the generic version.
+ assembler->Bind(label());
+ }
+
+ // Throw away everything on the backtrack stack since the start
+ // of the negative submatch and restore the character position.
+ assembler->ReadCurrentPositionFromRegister(current_position_register_);
+ assembler->ReadStackPointerFromRegister(stack_pointer_register_);
+ if (clear_capture_count_ > 0) {
+ // Clear any captures that might have been performed during the success
+ // of the body of the negative look-ahead.
+ int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1;
+ assembler->ClearRegisters(clear_capture_start_, clear_capture_end);
+ }
+ // Now that we have unwound the stack we find at the top of the stack the
+ // backtrack that the BeginSubmatch node got.
+ assembler->Backtrack();
+}
+
+void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ if (!label()->is_bound()) {
+ assembler->Bind(label());
+ }
+ switch (action_) {
+ case ACCEPT:
+ assembler->Succeed();
+ return;
+ case BACKTRACK:
+ assembler->GoTo(trace->backtrack());
+ return;
+ case NEGATIVE_SUBMATCH_SUCCESS:
+ // This case is handled in a different virtual method.
+ UNREACHABLE();
+ }
+ UNIMPLEMENTED();
+}
+
+void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
+ if (guards_ == nullptr) guards_ = new (zone) ZoneList<Guard*>(1, zone);
+ guards_->Add(guard, zone);
+}
+
+ActionNode* ActionNode::SetRegister(int reg, int val, RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(SET_REGISTER, on_success);
+ result->data_.u_store_register.reg = reg;
+ result->data_.u_store_register.value = val;
+ return result;
+}
+
+ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success);
+ result->data_.u_increment_register.reg = reg;
+ return result;
+}
+
+ActionNode* ActionNode::StorePosition(int reg, bool is_capture,
+ RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(STORE_POSITION, on_success);
+ result->data_.u_position_register.reg = reg;
+ result->data_.u_position_register.is_capture = is_capture;
+ return result;
+}
+
+ActionNode* ActionNode::ClearCaptures(Interval range, RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success);
+ result->data_.u_clear_captures.range_from = range.from();
+ result->data_.u_clear_captures.range_to = range.to();
+ return result;
+}
+
+ActionNode* ActionNode::BeginSubmatch(int stack_reg, int position_reg,
+ RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success);
+ result->data_.u_submatch.stack_pointer_register = stack_reg;
+ result->data_.u_submatch.current_position_register = position_reg;
+ return result;
+}
+
+ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg, int position_reg,
+ int clear_register_count,
+ int clear_register_from,
+ RegExpNode* on_success) {
+ ActionNode* result = new (on_success->zone())
+ ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
+ result->data_.u_submatch.stack_pointer_register = stack_reg;
+ result->data_.u_submatch.current_position_register = position_reg;
+ result->data_.u_submatch.clear_register_count = clear_register_count;
+ result->data_.u_submatch.clear_register_from = clear_register_from;
+ return result;
+}
+
+ActionNode* ActionNode::EmptyMatchCheck(int start_register,
+ int repetition_register,
+ int repetition_limit,
+ RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success);
+ result->data_.u_empty_match_check.start_register = start_register;
+ result->data_.u_empty_match_check.repetition_register = repetition_register;
+ result->data_.u_empty_match_check.repetition_limit = repetition_limit;
+ return result;
+}
+
+#define DEFINE_ACCEPT(Type) \
+ void Type##Node::Accept(NodeVisitor* visitor) { visitor->Visit##Type(this); }
+FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
+#undef DEFINE_ACCEPT
+
+void LoopChoiceNode::Accept(NodeVisitor* visitor) {
+ visitor->VisitLoopChoice(this);
+}
+
+// -------------------------------------------------------------------
+// Emit code.
+
+void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
+ Guard* guard, Trace* trace) {
+ switch (guard->op()) {
+ case Guard::LT:
+ DCHECK(!trace->mentions_reg(guard->reg()));
+ macro_assembler->IfRegisterGE(guard->reg(), guard->value(),
+ trace->backtrack());
+ break;
+ case Guard::GEQ:
+ DCHECK(!trace->mentions_reg(guard->reg()));
+ macro_assembler->IfRegisterLT(guard->reg(), guard->value(),
+ trace->backtrack());
+ break;
+ }
+}
+
+// Returns the number of characters in the equivalence class, omitting those
+// that cannot occur in the source string because it is Latin1.
+static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
+ bool one_byte_subject,
+ unibrow::uchar* letters,
+ int letter_length) {
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeSet set;
+ set.add(character);
+ set = set.closeOver(USET_CASE_INSENSITIVE);
+ int32_t range_count = set.getRangeCount();
+ int items = 0;
+ for (int32_t i = 0; i < range_count; i++) {
+ UChar32 start = set.getRangeStart(i);
+ UChar32 end = set.getRangeEnd(i);
+ CHECK(end - start + items <= letter_length);
+ while (start <= end) {
+ if (one_byte_subject && start > String::kMaxOneByteCharCode) break;
+ letters[items++] = (unibrow::uchar)(start);
+ start++;
+ }
+ }
+ return items;
+#else
+ int length =
+ isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
+ // Unibrow returns 0 or 1 for characters where case independence is
+ // trivial.
+ if (length == 0) {
+ letters[0] = character;
+ length = 1;
+ }
+
+ if (one_byte_subject) {
+ int new_length = 0;
+ for (int i = 0; i < length; i++) {
+ if (letters[i] <= String::kMaxOneByteCharCode) {
+ letters[new_length++] = letters[i];
+ }
+ }
+ length = new_length;
+ }
+
+ return length;
+#endif // V8_INTL_SUPPORT
+}
+
+static inline bool EmitSimpleCharacter(Isolate* isolate,
+ RegExpCompiler* compiler, uc16 c,
+ Label* on_failure, int cp_offset,
+ bool check, bool preloaded) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ bool bound_checked = false;
+ if (!preloaded) {
+ assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+ bound_checked = true;
+ }
+ assembler->CheckNotCharacter(c, on_failure);
+ return bound_checked;
+}
+
+// Only emits non-letters (things that don't have case). Only used for case
+// independent matches.
+static inline bool EmitAtomNonLetter(Isolate* isolate, RegExpCompiler* compiler,
+ uc16 c, Label* on_failure, int cp_offset,
+ bool check, bool preloaded) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ bool one_byte = compiler->one_byte();
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
+ if (length < 1) {
+ // This can't match. Must be an one-byte subject and a non-one-byte
+ // character. We do not need to do anything since the one-byte pass
+ // already handled this.
+ return false; // Bounds not checked.
+ }
+ bool checked = false;
+ // We handle the length > 1 case in a later pass.
+ if (length == 1) {
+ if (one_byte && c > String::kMaxOneByteCharCodeU) {
+ // Can't match - see above.
+ return false; // Bounds not checked.
+ }
+ if (!preloaded) {
+ macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+ checked = check;
+ }
+ macro_assembler->CheckNotCharacter(c, on_failure);
+ }
+ return checked;
+}
+
+static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
+ bool one_byte, uc16 c1, uc16 c2,
+ Label* on_failure) {
+ uc16 char_mask;
+ if (one_byte) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ uc16 exor = c1 ^ c2;
+ // Check whether exor has only one bit set.
+ if (((exor - 1) & exor) == 0) {
+ // If c1 and c2 differ only by one bit.
+ // Ecma262UnCanonicalize always gives the highest number last.
+ DCHECK(c2 > c1);
+ uc16 mask = char_mask ^ exor;
+ macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
+ return true;
+ }
+ DCHECK(c2 > c1);
+ uc16 diff = c2 - c1;
+ if (((diff - 1) & diff) == 0 && c1 >= diff) {
+ // If the characters differ by 2^n but don't differ by one bit then
+ // subtract the difference from the found character, then do the or
+ // trick. We avoid the theoretical case where negative numbers are
+ // involved in order to simplify code generation.
+ uc16 mask = char_mask ^ diff;
+ macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff, diff, mask,
+ on_failure);
+ return true;
+ }
+ return false;
+}
+
+using EmitCharacterFunction = bool(Isolate* isolate, RegExpCompiler* compiler,
+ uc16 c, Label* on_failure, int cp_offset,
+ bool check, bool preloaded);
+
+// Only emits letters (things that have case). Only used for case independent
+// matches.
+static inline bool EmitAtomLetter(Isolate* isolate, RegExpCompiler* compiler,
+ uc16 c, Label* on_failure, int cp_offset,
+ bool check, bool preloaded) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ bool one_byte = compiler->one_byte();
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
+ if (length <= 1) return false;
+ // We may not need to check against the end of the input string
+ // if this character lies before a character that matched.
+ if (!preloaded) {
+ macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+ }
+ Label ok;
+ switch (length) {
+ case 2: {
+ if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0],
+ chars[1], on_failure)) {
+ } else {
+ macro_assembler->CheckCharacter(chars[0], &ok);
+ macro_assembler->CheckNotCharacter(chars[1], on_failure);
+ macro_assembler->Bind(&ok);
+ }
+ break;
+ }
+ case 4:
+ macro_assembler->CheckCharacter(chars[3], &ok);
+ V8_FALLTHROUGH;
+ case 3:
+ macro_assembler->CheckCharacter(chars[0], &ok);
+ macro_assembler->CheckCharacter(chars[1], &ok);
+ macro_assembler->CheckNotCharacter(chars[2], on_failure);
+ macro_assembler->Bind(&ok);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return true;
+}
+
+static void EmitBoundaryTest(RegExpMacroAssembler* masm, int border,
+ Label* fall_through, Label* above_or_equal,
+ Label* below) {
+ if (below != fall_through) {
+ masm->CheckCharacterLT(border, below);
+ if (above_or_equal != fall_through) masm->GoTo(above_or_equal);
+ } else {
+ masm->CheckCharacterGT(border - 1, above_or_equal);
+ }
+}
+
+static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm, int first,
+ int last, Label* fall_through,
+ Label* in_range, Label* out_of_range) {
+ if (in_range == fall_through) {
+ if (first == last) {
+ masm->CheckNotCharacter(first, out_of_range);
+ } else {
+ masm->CheckCharacterNotInRange(first, last, out_of_range);
+ }
+ } else {
+ if (first == last) {
+ masm->CheckCharacter(first, in_range);
+ } else {
+ masm->CheckCharacterInRange(first, last, in_range);
+ }
+ if (out_of_range != fall_through) masm->GoTo(out_of_range);
+ }
+}
+
+// even_label is for ranges[i] to ranges[i + 1] where i - start_index is even.
+// odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd.
+static void EmitUseLookupTable(RegExpMacroAssembler* masm,
+ ZoneList<int>* ranges, int start_index,
+ int end_index, int min_char, Label* fall_through,
+ Label* even_label, Label* odd_label) {
+ static const int kSize = RegExpMacroAssembler::kTableSize;
+ static const int kMask = RegExpMacroAssembler::kTableMask;
+
+ int base = (min_char & ~kMask);
+ USE(base);
+
+ // Assert that everything is on one kTableSize page.
+ for (int i = start_index; i <= end_index; i++) {
+ DCHECK_EQ(ranges->at(i) & ~kMask, base);
+ }
+ DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
+
+ char templ[kSize];
+ Label* on_bit_set;
+ Label* on_bit_clear;
+ int bit;
+ if (even_label == fall_through) {
+ on_bit_set = odd_label;
+ on_bit_clear = even_label;
+ bit = 1;
+ } else {
+ on_bit_set = even_label;
+ on_bit_clear = odd_label;
+ bit = 0;
+ }
+ for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) {
+ templ[i] = bit;
+ }
+ int j = 0;
+ bit ^= 1;
+ for (int i = start_index; i < end_index; i++) {
+ for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) {
+ templ[j] = bit;
+ }
+ bit ^= 1;
+ }
+ for (int i = j; i < kSize; i++) {
+ templ[i] = bit;
+ }
+ Factory* factory = masm->isolate()->factory();
+ // TODO(erikcorry): Cache these.
+ Handle<ByteArray> ba = factory->NewByteArray(kSize, AllocationType::kOld);
+ for (int i = 0; i < kSize; i++) {
+ ba->set(i, templ[i]);
+ }
+ masm->CheckBitInTable(ba, on_bit_set);
+ if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear);
+}
+
+static void CutOutRange(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
+ int start_index, int end_index, int cut_index,
+ Label* even_label, Label* odd_label) {
+ bool odd = (((cut_index - start_index) & 1) == 1);
+ Label* in_range_label = odd ? odd_label : even_label;
+ Label dummy;
+ EmitDoubleBoundaryTest(masm, ranges->at(cut_index),
+ ranges->at(cut_index + 1) - 1, &dummy, in_range_label,
+ &dummy);
+ DCHECK(!dummy.is_linked());
+ // Cut out the single range by rewriting the array. This creates a new
+ // range that is a merger of the two ranges on either side of the one we
+ // are cutting out. The oddity of the labels is preserved.
+ for (int j = cut_index; j > start_index; j--) {
+ ranges->at(j) = ranges->at(j - 1);
+ }
+ for (int j = cut_index + 1; j < end_index; j++) {
+ ranges->at(j) = ranges->at(j + 1);
+ }
+}
+
+// Unicode case. Split the search space into kSize spaces that are handled
+// with recursion.
+static void SplitSearchSpace(ZoneList<int>* ranges, int start_index,
+ int end_index, int* new_start_index,
+ int* new_end_index, int* border) {
+ static const int kSize = RegExpMacroAssembler::kTableSize;
+ static const int kMask = RegExpMacroAssembler::kTableMask;
+
+ int first = ranges->at(start_index);
+ int last = ranges->at(end_index) - 1;
+
+ *new_start_index = start_index;
+ *border = (ranges->at(start_index) & ~kMask) + kSize;
+ while (*new_start_index < end_index) {
+ if (ranges->at(*new_start_index) > *border) break;
+ (*new_start_index)++;
+ }
+ // new_start_index is the index of the first edge that is beyond the
+ // current kSize space.
+
+ // For very large search spaces we do a binary chop search of the non-Latin1
+ // space instead of just going to the end of the current kSize space. The
+ // heuristics are complicated a little by the fact that any 128-character
+ // encoding space can be quickly tested with a table lookup, so we don't
+ // wish to do binary chop search at a smaller granularity than that. A
+ // 128-character space can take up a lot of space in the ranges array if,
+ // for example, we only want to match every second character (eg. the lower
+ // case characters on some Unicode pages).
+ int binary_chop_index = (end_index + start_index) / 2;
+ // The first test ensures that we get to the code that handles the Latin1
+ // range with a single not-taken branch, speeding up this important
+ // character range (even non-Latin1 charset-based text has spaces and
+ // punctuation).
+ if (*border - 1 > String::kMaxOneByteCharCode && // Latin1 case.
+ end_index - start_index > (*new_start_index - start_index) * 2 &&
+ last - first > kSize * 2 && binary_chop_index > *new_start_index &&
+ ranges->at(binary_chop_index) >= first + 2 * kSize) {
+ int scan_forward_for_section_border = binary_chop_index;
+ int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
+
+ while (scan_forward_for_section_border < end_index) {
+ if (ranges->at(scan_forward_for_section_border) > new_border) {
+ *new_start_index = scan_forward_for_section_border;
+ *border = new_border;
+ break;
+ }
+ scan_forward_for_section_border++;
+ }
+ }
+
+ DCHECK(*new_start_index > start_index);
+ *new_end_index = *new_start_index - 1;
+ if (ranges->at(*new_end_index) == *border) {
+ (*new_end_index)--;
+ }
+ if (*border >= ranges->at(end_index)) {
+ *border = ranges->at(end_index);
+ *new_start_index = end_index; // Won't be used.
+ *new_end_index = end_index - 1;
+ }
+}
+
+// Gets a series of segment boundaries representing a character class. If the
+// character is in the range between an even and an odd boundary (counting from
+// start_index) then go to even_label, otherwise go to odd_label. We already
+// know that the character is in the range of min_char to max_char inclusive.
+// Either label can be nullptr indicating backtracking. Either label can also
+// be equal to the fall_through label.
+static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
+ int start_index, int end_index, uc32 min_char,
+ uc32 max_char, Label* fall_through,
+ Label* even_label, Label* odd_label) {
+ DCHECK_LE(min_char, String::kMaxUtf16CodeUnit);
+ DCHECK_LE(max_char, String::kMaxUtf16CodeUnit);
+
+ int first = ranges->at(start_index);
+ int last = ranges->at(end_index) - 1;
+
+ DCHECK_LT(min_char, first);
+
+ // Just need to test if the character is before or on-or-after
+ // a particular character.
+ if (start_index == end_index) {
+ EmitBoundaryTest(masm, first, fall_through, even_label, odd_label);
+ return;
+ }
+
+ // Another almost trivial case: There is one interval in the middle that is
+ // different from the end intervals.
+ if (start_index + 1 == end_index) {
+ EmitDoubleBoundaryTest(masm, first, last, fall_through, even_label,
+ odd_label);
+ return;
+ }
+
+ // It's not worth using table lookup if there are very few intervals in the
+ // character class.
+ if (end_index - start_index <= 6) {
+ // It is faster to test for individual characters, so we look for those
+ // first, then try arbitrary ranges in the second round.
+ static int kNoCutIndex = -1;
+ int cut = kNoCutIndex;
+ for (int i = start_index; i < end_index; i++) {
+ if (ranges->at(i) == ranges->at(i + 1) - 1) {
+ cut = i;
+ break;
+ }
+ }
+ if (cut == kNoCutIndex) cut = start_index;
+ CutOutRange(masm, ranges, start_index, end_index, cut, even_label,
+ odd_label);
+ DCHECK_GE(end_index - start_index, 2);
+ GenerateBranches(masm, ranges, start_index + 1, end_index - 1, min_char,
+ max_char, fall_through, even_label, odd_label);
+ return;
+ }
+
+ // If there are a lot of intervals in the regexp, then we will use tables to
+ // determine whether the character is inside or outside the character class.
+ static const int kBits = RegExpMacroAssembler::kTableSizeBits;
+
+ if ((max_char >> kBits) == (min_char >> kBits)) {
+ EmitUseLookupTable(masm, ranges, start_index, end_index, min_char,
+ fall_through, even_label, odd_label);
+ return;
+ }
+
+ if ((min_char >> kBits) != (first >> kBits)) {
+ masm->CheckCharacterLT(first, odd_label);
+ GenerateBranches(masm, ranges, start_index + 1, end_index, first, max_char,
+ fall_through, odd_label, even_label);
+ return;
+ }
+
+ int new_start_index = 0;
+ int new_end_index = 0;
+ int border = 0;
+
+ SplitSearchSpace(ranges, start_index, end_index, &new_start_index,
+ &new_end_index, &border);
+
+ Label handle_rest;
+ Label* above = &handle_rest;
+ if (border == last + 1) {
+ // We didn't find any section that started after the limit, so everything
+ // above the border is one of the terminal labels.
+ above = (end_index & 1) != (start_index & 1) ? odd_label : even_label;
+ DCHECK(new_end_index == end_index - 1);
+ }
+
+ DCHECK_LE(start_index, new_end_index);
+ DCHECK_LE(new_start_index, end_index);
+ DCHECK_LT(start_index, new_start_index);
+ DCHECK_LT(new_end_index, end_index);
+ DCHECK(new_end_index + 1 == new_start_index ||
+ (new_end_index + 2 == new_start_index &&
+ border == ranges->at(new_end_index + 1)));
+ DCHECK_LT(min_char, border - 1);
+ DCHECK_LT(border, max_char);
+ DCHECK_LT(ranges->at(new_end_index), border);
+ DCHECK(border < ranges->at(new_start_index) ||
+ (border == ranges->at(new_start_index) &&
+ new_start_index == end_index && new_end_index == end_index - 1 &&
+ border == last + 1));
+ DCHECK(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
+
+ masm->CheckCharacterGT(border - 1, above);
+ Label dummy;
+ GenerateBranches(masm, ranges, start_index, new_end_index, min_char,
+ border - 1, &dummy, even_label, odd_label);
+ if (handle_rest.is_linked()) {
+ masm->Bind(&handle_rest);
+ bool flip = (new_start_index & 1) != (start_index & 1);
+ GenerateBranches(masm, ranges, new_start_index, end_index, border, max_char,
+ &dummy, flip ? odd_label : even_label,
+ flip ? even_label : odd_label);
+ }
+}
+
+static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
+ RegExpCharacterClass* cc, bool one_byte,
+ Label* on_failure, int cp_offset, bool check_offset,
+ bool preloaded, Zone* zone) {
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone);
+ CharacterRange::Canonicalize(ranges);
+
+ int max_char;
+ if (one_byte) {
+ max_char = String::kMaxOneByteCharCode;
+ } else {
+ max_char = String::kMaxUtf16CodeUnit;
+ }
+
+ int range_count = ranges->length();
+
+ int last_valid_range = range_count - 1;
+ while (last_valid_range >= 0) {
+ CharacterRange& range = ranges->at(last_valid_range);
+ if (range.from() <= max_char) {
+ break;
+ }
+ last_valid_range--;
+ }
+
+ if (last_valid_range < 0) {
+ if (!cc->is_negated()) {
+ macro_assembler->GoTo(on_failure);
+ }
+ if (check_offset) {
+ macro_assembler->CheckPosition(cp_offset, on_failure);
+ }
+ return;
+ }
+
+ if (last_valid_range == 0 && ranges->at(0).IsEverything(max_char)) {
+ if (cc->is_negated()) {
+ macro_assembler->GoTo(on_failure);
+ } else {
+ // This is a common case hit by non-anchored expressions.
+ if (check_offset) {
+ macro_assembler->CheckPosition(cp_offset, on_failure);
+ }
+ }
+ return;
+ }
+
+ if (!preloaded) {
+ macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
+ }
+
+ if (cc->is_standard(zone) && macro_assembler->CheckSpecialCharacterClass(
+ cc->standard_type(), on_failure)) {
+ return;
+ }
+
+ // A new list with ascending entries. Each entry is a code unit
+ // where there is a boundary between code units that are part of
+ // the class and code units that are not. Normally we insert an
+ // entry at zero which goes to the failure label, but if there
+ // was already one there we fall through for success on that entry.
+ // Subsequent entries have alternating meaning (success/failure).
+ ZoneList<int>* range_boundaries =
+ new (zone) ZoneList<int>(last_valid_range, zone);
+
+ bool zeroth_entry_is_failure = !cc->is_negated();
+
+ for (int i = 0; i <= last_valid_range; i++) {
+ CharacterRange& range = ranges->at(i);
+ if (range.from() == 0) {
+ DCHECK_EQ(i, 0);
+ zeroth_entry_is_failure = !zeroth_entry_is_failure;
+ } else {
+ range_boundaries->Add(range.from(), zone);
+ }
+ range_boundaries->Add(range.to() + 1, zone);
+ }
+ int end_index = range_boundaries->length() - 1;
+ if (range_boundaries->at(end_index) > max_char) {
+ end_index--;
+ }
+
+ Label fall_through;
+ GenerateBranches(macro_assembler, range_boundaries,
+ 0, // start_index.
+ end_index,
+ 0, // min_char.
+ max_char, &fall_through,
+ zeroth_entry_is_failure ? &fall_through : on_failure,
+ zeroth_entry_is_failure ? on_failure : &fall_through);
+ macro_assembler->Bind(&fall_through);
+}
+
+RegExpNode::~RegExpNode() = default;
+
+RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
+ Trace* trace) {
+ // If we are generating a greedy loop then don't stop and don't reuse code.
+ if (trace->stop_node() != nullptr) {
+ return CONTINUE;
+ }
+
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ if (trace->is_trivial()) {
+ if (label_.is_bound() || on_work_list() || !KeepRecursing(compiler)) {
+ // If a generic version is already scheduled to be generated or we have
+ // recursed too deeply then just generate a jump to that code.
+ macro_assembler->GoTo(&label_);
+ // This will queue it up for generation of a generic version if it hasn't
+ // already been queued.
+ compiler->AddWork(this);
+ return DONE;
+ }
+ // Generate generic version of the node and bind the label for later use.
+ macro_assembler->Bind(&label_);
+ return CONTINUE;
+ }
+
+ // We are being asked to make a non-generic version. Keep track of how many
+ // non-generic versions we generate so as not to overdo it.
+ trace_count_++;
+ if (KeepRecursing(compiler) && compiler->optimize() &&
+ trace_count_ < kMaxCopiesCodeGenerated) {
+ return CONTINUE;
+ }
+
+ // If we get here code has been generated for this node too many times or
+ // recursion is too deep. Time to switch to a generic version. The code for
+ // generic versions above can handle deep recursion properly.
+ bool was_limiting = compiler->limiting_recursion();
+ compiler->set_limiting_recursion(true);
+ trace->Flush(compiler, this);
+ compiler->set_limiting_recursion(was_limiting);
+ return DONE;
+}
+
+bool RegExpNode::KeepRecursing(RegExpCompiler* compiler) {
+ return !compiler->limiting_recursion() &&
+ compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion;
+}
+
+int ActionNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
+ if (budget <= 0) return 0;
+ if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
+ return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) {
+ // Anything may follow a positive submatch success, thus we need to accept
+ // all characters from this position onwards.
+ bm->SetRest(offset);
+ } else {
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ }
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+int AssertionNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ if (budget <= 0) return 0;
+ // If we know we are not at the start and we are asked "how many characters
+ // will you match if you succeed?" then we can answer anything since false
+ // implies false. So lets just return the max answer (still_to_find) since
+ // that won't prevent us from preloading a lot of characters for the other
+ // branches in the node graph.
+ if (assertion_type() == AT_START && not_at_start) return still_to_find;
+ return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ // Match the behaviour of EatsAtLeast on this node.
+ if (assertion_type() == AT_START && not_at_start) return;
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+int BackReferenceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ if (read_backward()) return 0;
+ if (budget <= 0) return 0;
+ return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+int TextNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
+ if (read_backward()) return 0;
+ int answer = Length();
+ if (answer >= still_to_find) return answer;
+ if (budget <= 0) return answer;
+ // We are not at start after this node so we set the last argument to 'true'.
+ return answer +
+ on_success()->EatsAtLeast(still_to_find - answer, budget - 1, true);
+}
+
+int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ if (budget <= 0) return 0;
+ // Alternative 0 is the negative lookahead, alternative 1 is what comes
+ // afterwards.
+ RegExpNode* node = alternatives_->at(1).node();
+ return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+void NegativeLookaroundChoiceNode::GetQuickCheckDetails(
+ QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) {
+ // Alternative 0 is the negative lookahead, alternative 1 is what comes
+ // afterwards.
+ RegExpNode* node = alternatives_->at(1).node();
+ return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
+}
+
+int ChoiceNode::EatsAtLeastHelper(int still_to_find, int budget,
+ RegExpNode* ignore_this_node,
+ bool not_at_start) {
+ if (budget <= 0) return 0;
+ int min = 100;
+ int choice_count = alternatives_->length();
+ budget = (budget - 1) / choice_count;
+ for (int i = 0; i < choice_count; i++) {
+ RegExpNode* node = alternatives_->at(i).node();
+ if (node == ignore_this_node) continue;
+ int node_eats_at_least =
+ node->EatsAtLeast(still_to_find, budget, not_at_start);
+ if (node_eats_at_least < min) min = node_eats_at_least;
+ if (min == 0) return 0;
+ }
+ return min;
+}
+
+int LoopChoiceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ return EatsAtLeastHelper(still_to_find, budget - 1, loop_node_, not_at_start);
+}
+
+int ChoiceNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
+ return EatsAtLeastHelper(still_to_find, budget, nullptr, not_at_start);
+}
+
+// Takes the left-most 1-bit and smears it out, setting all bits to its right.
+static inline uint32_t SmearBitsRight(uint32_t v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return v;
+}
+
+bool QuickCheckDetails::Rationalize(bool asc) {
+ bool found_useful_op = false;
+ uint32_t char_mask;
+ if (asc) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ mask_ = 0;
+ value_ = 0;
+ int char_shift = 0;
+ for (int i = 0; i < characters_; i++) {
+ Position* pos = &positions_[i];
+ if ((pos->mask & String::kMaxOneByteCharCode) != 0) {
+ found_useful_op = true;
+ }
+ mask_ |= (pos->mask & char_mask) << char_shift;
+ value_ |= (pos->value & char_mask) << char_shift;
+ char_shift += asc ? 8 : 16;
+ }
+ return found_useful_op;
+}
+
+bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
+ Trace* bounds_check_trace, Trace* trace,
+ bool preload_has_checked_bounds,
+ Label* on_possible_success,
+ QuickCheckDetails* details,
+ bool fall_through_on_failure) {
+ if (details->characters() == 0) return false;
+ GetQuickCheckDetails(details, compiler, 0,
+ trace->at_start() == Trace::FALSE_VALUE);
+ if (details->cannot_match()) return false;
+ if (!details->Rationalize(compiler->one_byte())) return false;
+ DCHECK(details->characters() == 1 ||
+ compiler->macro_assembler()->CanReadUnaligned());
+ uint32_t mask = details->mask();
+ uint32_t value = details->value();
+
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+ if (trace->characters_preloaded() != details->characters()) {
+ DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset());
+ // We are attempting to preload the minimum number of characters
+ // any choice would eat, so if the bounds check fails, then none of the
+ // choices can succeed, so we can just immediately backtrack, rather
+ // than go to the next choice.
+ assembler->LoadCurrentCharacter(
+ trace->cp_offset(), bounds_check_trace->backtrack(),
+ !preload_has_checked_bounds, details->characters());
+ }
+
+ bool need_mask = true;
+
+ if (details->characters() == 1) {
+ // If number of characters preloaded is 1 then we used a byte or 16 bit
+ // load so the value is already masked down.
+ uint32_t char_mask;
+ if (compiler->one_byte()) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ if ((mask & char_mask) == char_mask) need_mask = false;
+ mask &= char_mask;
+ } else {
+ // For 2-character preloads in one-byte mode or 1-character preloads in
+ // two-byte mode we also use a 16 bit load with zero extend.
+ static const uint32_t kTwoByteMask = 0xFFFF;
+ static const uint32_t kFourByteMask = 0xFFFFFFFF;
+ if (details->characters() == 2 && compiler->one_byte()) {
+ if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
+ } else if (details->characters() == 1 && !compiler->one_byte()) {
+ if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
+ } else {
+ if (mask == kFourByteMask) need_mask = false;
+ }
+ }
+
+ if (fall_through_on_failure) {
+ if (need_mask) {
+ assembler->CheckCharacterAfterAnd(value, mask, on_possible_success);
+ } else {
+ assembler->CheckCharacter(value, on_possible_success);
+ }
+ } else {
+ if (need_mask) {
+ assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack());
+ } else {
+ assembler->CheckNotCharacter(value, trace->backtrack());
+ }
+ }
+ return true;
+}
+
+// Here is the meat of GetQuickCheckDetails (see also the comment on the
+// super-class in the .h file).
+//
+// We iterate along the text object, building up for each character a
+// mask and value that can be used to test for a quick failure to match.
+// The masks and values for the positions will be combined into a single
+// machine word for the current character width in order to be used in
+// generating a quick check.
+void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) {
+ // Do not collect any quick check details if the text node reads backward,
+ // since it reads in the opposite direction than we use for quick checks.
+ if (read_backward()) return;
+ Isolate* isolate = compiler->macro_assembler()->isolate();
+ DCHECK(characters_filled_in < details->characters());
+ int characters = details->characters();
+ int char_mask;
+ if (compiler->one_byte()) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ for (int k = 0; k < elements()->length(); k++) {
+ TextElement elm = elements()->at(k);
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
+ for (int i = 0; i < characters && i < quarks.length(); i++) {
+ QuickCheckDetails::Position* pos =
+ details->positions(characters_filled_in);
+ uc16 c = quarks[i];
+ if (elm.atom()->ignore_case()) {
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(
+ isolate, c, compiler->one_byte(), chars, 4);
+ if (length == 0) {
+ // This can happen because all case variants are non-Latin1, but we
+ // know the input is Latin1.
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
+ if (length == 1) {
+ // This letter has no case equivalents, so it's nice and simple
+ // and the mask-compare will determine definitely whether we have
+ // a match at this character position.
+ pos->mask = char_mask;
+ pos->value = c;
+ pos->determines_perfectly = true;
+ } else {
+ uint32_t common_bits = char_mask;
+ uint32_t bits = chars[0];
+ for (int j = 1; j < length; j++) {
+ uint32_t differing_bits = ((chars[j] & common_bits) ^ bits);
+ common_bits ^= differing_bits;
+ bits &= common_bits;
+ }
+ // If length is 2 and common bits has only one zero in it then
+ // our mask and compare instruction will determine definitely
+ // whether we have a match at this character position. Otherwise
+ // it can only be an approximate check.
+ uint32_t one_zero = (common_bits | ~char_mask);
+ if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) {
+ pos->determines_perfectly = true;
+ }
+ pos->mask = common_bits;
+ pos->value = bits;
+ }
+ } else {
+ // Don't ignore case. Nice simple case where the mask-compare will
+ // determine definitely whether we have a match at this character
+ // position.
+ if (c > char_mask) {
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
+ pos->mask = char_mask;
+ pos->value = c;
+ pos->determines_perfectly = true;
+ }
+ characters_filled_in++;
+ DCHECK(characters_filled_in <= details->characters());
+ if (characters_filled_in == details->characters()) {
+ return;
+ }
+ }
+ } else {
+ QuickCheckDetails::Position* pos =
+ details->positions(characters_filled_in);
+ RegExpCharacterClass* tree = elm.char_class();
+ ZoneList<CharacterRange>* ranges = tree->ranges(zone());
+ DCHECK(!ranges->is_empty());
+ if (tree->is_negated()) {
+ // A quick check uses multi-character mask and compare. There is no
+ // useful way to incorporate a negative char class into this scheme
+ // so we just conservatively create a mask and value that will always
+ // succeed.
+ pos->mask = 0;
+ pos->value = 0;
+ } else {
+ int first_range = 0;
+ while (ranges->at(first_range).from() > char_mask) {
+ first_range++;
+ if (first_range == ranges->length()) {
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
+ }
+ CharacterRange range = ranges->at(first_range);
+ uc16 from = range.from();
+ uc16 to = range.to();
+ if (to > char_mask) {
+ to = char_mask;
+ }
+ uint32_t differing_bits = (from ^ to);
+ // A mask and compare is only perfect if the differing bits form a
+ // number like 00011111 with one single block of trailing 1s.
+ if ((differing_bits & (differing_bits + 1)) == 0 &&
+ from + differing_bits == to) {
+ pos->determines_perfectly = true;
+ }
+ uint32_t common_bits = ~SmearBitsRight(differing_bits);
+ uint32_t bits = (from & common_bits);
+ for (int i = first_range + 1; i < ranges->length(); i++) {
+ CharacterRange range = ranges->at(i);
+ uc16 from = range.from();
+ uc16 to = range.to();
+ if (from > char_mask) continue;
+ if (to > char_mask) to = char_mask;
+ // Here we are combining more ranges into the mask and compare
+ // value. With each new range the mask becomes more sparse and
+ // so the chances of a false positive rise. A character class
+ // with multiple ranges is assumed never to be equivalent to a
+ // mask and compare operation.
+ pos->determines_perfectly = false;
+ uint32_t new_common_bits = (from ^ to);
+ new_common_bits = ~SmearBitsRight(new_common_bits);
+ common_bits &= new_common_bits;
+ bits &= new_common_bits;
+ uint32_t differing_bits = (from & common_bits) ^ bits;
+ common_bits ^= differing_bits;
+ bits &= common_bits;
+ }
+ pos->mask = common_bits;
+ pos->value = bits;
+ }
+ characters_filled_in++;
+ DCHECK(characters_filled_in <= details->characters());
+ if (characters_filled_in == details->characters()) {
+ return;
+ }
+ }
+ }
+ DCHECK(characters_filled_in != details->characters());
+ if (!details->cannot_match()) {
+ on_success()->GetQuickCheckDetails(details, compiler, characters_filled_in,
+ true);
+ }
+}
+
+void QuickCheckDetails::Clear() {
+ for (int i = 0; i < characters_; i++) {
+ positions_[i].mask = 0;
+ positions_[i].value = 0;
+ positions_[i].determines_perfectly = false;
+ }
+ characters_ = 0;
+}
+
+void QuickCheckDetails::Advance(int by, bool one_byte) {
+ if (by >= characters_ || by < 0) {
+ DCHECK_IMPLIES(by < 0, characters_ == 0);
+ Clear();
+ return;
+ }
+ DCHECK_LE(characters_ - by, 4);
+ DCHECK_LE(characters_, 4);
+ for (int i = 0; i < characters_ - by; i++) {
+ positions_[i] = positions_[by + i];
+ }
+ for (int i = characters_ - by; i < characters_; i++) {
+ positions_[i].mask = 0;
+ positions_[i].value = 0;
+ positions_[i].determines_perfectly = false;
+ }
+ characters_ -= by;
+ // We could change mask_ and value_ here but we would never advance unless
+ // they had already been used in a check and they won't be used again because
+ // it would gain us nothing. So there's no point.
+}
+
+void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
+ DCHECK(characters_ == other->characters_);
+ if (other->cannot_match_) {
+ return;
+ }
+ if (cannot_match_) {
+ *this = *other;
+ return;
+ }
+ for (int i = from_index; i < characters_; i++) {
+ QuickCheckDetails::Position* pos = positions(i);
+ QuickCheckDetails::Position* other_pos = other->positions(i);
+ if (pos->mask != other_pos->mask || pos->value != other_pos->value ||
+ !other_pos->determines_perfectly) {
+ // Our mask-compare operation will be approximate unless we have the
+ // exact same operation on both sides of the alternation.
+ pos->determines_perfectly = false;
+ }
+ pos->mask &= other_pos->mask;
+ pos->value &= pos->mask;
+ other_pos->value &= pos->mask;
+ uc16 differing_bits = (pos->value ^ other_pos->value);
+ pos->mask &= ~differing_bits;
+ pos->value &= pos->mask;
+ }
+}
+
+class VisitMarker {
+ public:
+ explicit VisitMarker(NodeInfo* info) : info_(info) {
+ DCHECK(!info->visited);
+ info->visited = true;
+ }
+ ~VisitMarker() { info_->visited = false; }
+
+ private:
+ NodeInfo* info_;
+};
+
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ DCHECK(!info()->visited);
+ VisitMarker marker(info());
+ return FilterSuccessor(depth - 1);
+}
+
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
+ RegExpNode* next = on_success_->FilterOneByte(depth - 1);
+ if (next == nullptr) return set_replacement(nullptr);
+ on_success_ = next;
+ return set_replacement(this);
+}
+
+// We need to check for the following characters: 0x39C 0x3BC 0x178.
+bool RangeContainsLatin1Equivalents(CharacterRange range) {
+ // TODO(dcarney): this could be a lot more efficient.
+ return range.Contains(0x039C) || range.Contains(0x03BC) ||
+ range.Contains(0x0178);
+}
+
+static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
+ for (int i = 0; i < ranges->length(); i++) {
+ // TODO(dcarney): this could be a lot more efficient.
+ if (RangeContainsLatin1Equivalents(ranges->at(i))) return true;
+ }
+ return false;
+}
+
+RegExpNode* TextNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ DCHECK(!info()->visited);
+ VisitMarker marker(info());
+ int element_count = elements()->length();
+ for (int i = 0; i < element_count; i++) {
+ TextElement elm = elements()->at(i);
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
+ for (int j = 0; j < quarks.length(); j++) {
+ uint16_t c = quarks[j];
+ if (elm.atom()->ignore_case()) {
+ c = unibrow::Latin1::TryConvertToLatin1(c);
+ }
+ if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
+ // Replace quark in case we converted to Latin-1.
+ uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.begin());
+ writable_quarks[j] = c;
+ }
+ } else {
+ DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* cc = elm.char_class();
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::Canonicalize(ranges);
+ // Now they are in order so we only need to look at the first.
+ int range_count = ranges->length();
+ if (cc->is_negated()) {
+ if (range_count != 0 && ranges->at(0).from() == 0 &&
+ ranges->at(0).to() >= String::kMaxOneByteCharCode) {
+ // This will be handled in a later filter.
+ if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ continue;
+ return set_replacement(nullptr);
+ }
+ } else {
+ if (range_count == 0 ||
+ ranges->at(0).from() > String::kMaxOneByteCharCode) {
+ // This will be handled in a later filter.
+ if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ continue;
+ return set_replacement(nullptr);
+ }
+ }
+ }
+ }
+ return FilterSuccessor(depth - 1);
+}
+
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ if (info()->visited) return this;
+ {
+ VisitMarker marker(info());
+
+ RegExpNode* continue_replacement = continue_node_->FilterOneByte(depth - 1);
+ // If we can't continue after the loop then there is no sense in doing the
+ // loop.
+ if (continue_replacement == nullptr) return set_replacement(nullptr);
+ }
+
+ return ChoiceNode::FilterOneByte(depth - 1);
+}
+
+RegExpNode* ChoiceNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ if (info()->visited) return this;
+ VisitMarker marker(info());
+ int choice_count = alternatives_->length();
+
+ for (int i = 0; i < choice_count; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ if (alternative.guards() != nullptr &&
+ alternative.guards()->length() != 0) {
+ set_replacement(this);
+ return this;
+ }
+ }
+
+ int surviving = 0;
+ RegExpNode* survivor = nullptr;
+ for (int i = 0; i < choice_count; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ RegExpNode* replacement = alternative.node()->FilterOneByte(depth - 1);
+ DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK.
+ if (replacement != nullptr) {
+ alternatives_->at(i).set_node(replacement);
+ surviving++;
+ survivor = replacement;
+ }
+ }
+ if (surviving < 2) return set_replacement(survivor);
+
+ set_replacement(this);
+ if (surviving == choice_count) {
+ return this;
+ }
+ // Only some of the nodes survived the filtering. We need to rebuild the
+ // alternatives list.
+ ZoneList<GuardedAlternative>* new_alternatives =
+ new (zone()) ZoneList<GuardedAlternative>(surviving, zone());
+ for (int i = 0; i < choice_count; i++) {
+ RegExpNode* replacement =
+ alternatives_->at(i).node()->FilterOneByte(depth - 1);
+ if (replacement != nullptr) {
+ alternatives_->at(i).set_node(replacement);
+ new_alternatives->Add(alternatives_->at(i), zone());
+ }
+ }
+ alternatives_ = new_alternatives;
+ return this;
+}
+
+RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ if (info()->visited) return this;
+ VisitMarker marker(info());
+ // Alternative 0 is the negative lookahead, alternative 1 is what comes
+ // afterwards.
+ RegExpNode* node = alternatives_->at(1).node();
+ RegExpNode* replacement = node->FilterOneByte(depth - 1);
+ if (replacement == nullptr) return set_replacement(nullptr);
+ alternatives_->at(1).set_node(replacement);
+
+ RegExpNode* neg_node = alternatives_->at(0).node();
+ RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1);
+ // If the negative lookahead is always going to fail then
+ // we don't need to check it.
+ if (neg_replacement == nullptr) return set_replacement(replacement);
+ alternatives_->at(0).set_node(neg_replacement);
+ return set_replacement(this);
+}
+
+void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) {
+ if (body_can_be_zero_length_ || info()->visited) return;
+ VisitMarker marker(info());
+ return ChoiceNode::GetQuickCheckDetails(details, compiler,
+ characters_filled_in, not_at_start);
+}
+
+void LoopChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ if (body_can_be_zero_length_ || budget <= 0) {
+ bm->SetRest(offset);
+ SaveBMInfo(bm, not_at_start, offset);
+ return;
+ }
+ ChoiceNode::FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) {
+ not_at_start = (not_at_start || not_at_start_);
+ int choice_count = alternatives_->length();
+ DCHECK_LT(0, choice_count);
+ alternatives_->at(0).node()->GetQuickCheckDetails(
+ details, compiler, characters_filled_in, not_at_start);
+ for (int i = 1; i < choice_count; i++) {
+ QuickCheckDetails new_details(details->characters());
+ RegExpNode* node = alternatives_->at(i).node();
+ node->GetQuickCheckDetails(&new_details, compiler, characters_filled_in,
+ not_at_start);
+ // Here we merge the quick match details of the two branches.
+ details->Merge(&new_details, characters_filled_in);
+ }
+}
+
+// Check for [0-9A-Z_a-z].
+static void EmitWordCheck(RegExpMacroAssembler* assembler, Label* word,
+ Label* non_word, bool fall_through_on_word) {
+ if (assembler->CheckSpecialCharacterClass(
+ fall_through_on_word ? 'w' : 'W',
+ fall_through_on_word ? non_word : word)) {
+ // Optimized implementation available.
+ return;
+ }
+ assembler->CheckCharacterGT('z', non_word);
+ assembler->CheckCharacterLT('0', non_word);
+ assembler->CheckCharacterGT('a' - 1, word);
+ assembler->CheckCharacterLT('9' + 1, word);
+ assembler->CheckCharacterLT('A', non_word);
+ assembler->CheckCharacterLT('Z' + 1, word);
+ if (fall_through_on_word) {
+ assembler->CheckNotCharacter('_', non_word);
+ } else {
+ assembler->CheckCharacter('_', word);
+ }
+}
+
+// Emit the code to check for a ^ in multiline mode (1-character lookbehind
+// that matches newline or the start of input).
+static void EmitHat(RegExpCompiler* compiler, RegExpNode* on_success,
+ Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ // We will be loading the previous character into the current character
+ // register.
+ Trace new_trace(*trace);
+ new_trace.InvalidateCurrentCharacter();
+
+ Label ok;
+ if (new_trace.cp_offset() == 0) {
+ // The start of input counts as a newline in this context, so skip to
+ // ok if we are at the start.
+ assembler->CheckAtStart(&ok);
+ }
+ // We already checked that we are not at the start of input so it must be
+ // OK to load the previous character.
+ assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
+ new_trace.backtrack(), false);
+ if (!assembler->CheckSpecialCharacterClass('n', new_trace.backtrack())) {
+ // Newline means \n, \r, 0x2028 or 0x2029.
+ if (!compiler->one_byte()) {
+ assembler->CheckCharacterAfterAnd(0x2028, 0xFFFE, &ok);
+ }
+ assembler->CheckCharacter('\n', &ok);
+ assembler->CheckNotCharacter('\r', new_trace.backtrack());
+ }
+ assembler->Bind(&ok);
+ on_success->Emit(compiler, &new_trace);
+}
+
+// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
+void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->isolate();
+ Trace::TriBool next_is_word_character = Trace::UNKNOWN;
+ bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
+ BoyerMooreLookahead* lookahead = bm_info(not_at_start);
+ if (lookahead == nullptr) {
+ int eats_at_least = Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore,
+ kRecursionBudget, not_at_start));
+ if (eats_at_least >= 1) {
+ BoyerMooreLookahead* bm =
+ new (zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
+ FillInBMInfo(isolate, 0, kRecursionBudget, bm, not_at_start);
+ if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE_VALUE;
+ if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
+ }
+ } else {
+ if (lookahead->at(0)->is_non_word())
+ next_is_word_character = Trace::FALSE_VALUE;
+ if (lookahead->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
+ }
+ bool at_boundary = (assertion_type_ == AssertionNode::AT_BOUNDARY);
+ if (next_is_word_character == Trace::UNKNOWN) {
+ Label before_non_word;
+ Label before_word;
+ if (trace->characters_preloaded() != 1) {
+ assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
+ }
+ // Fall through on non-word.
+ EmitWordCheck(assembler, &before_word, &before_non_word, false);
+ // Next character is not a word character.
+ assembler->Bind(&before_non_word);
+ Label ok;
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
+ assembler->GoTo(&ok);
+
+ assembler->Bind(&before_word);
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
+ assembler->Bind(&ok);
+ } else if (next_is_word_character == Trace::TRUE_VALUE) {
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
+ } else {
+ DCHECK(next_is_word_character == Trace::FALSE_VALUE);
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
+ }
+}
+
+void AssertionNode::BacktrackIfPrevious(
+ RegExpCompiler* compiler, Trace* trace,
+ AssertionNode::IfPrevious backtrack_if_previous) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Trace new_trace(*trace);
+ new_trace.InvalidateCurrentCharacter();
+
+ Label fall_through, dummy;
+
+ Label* non_word = backtrack_if_previous == kIsNonWord ? new_trace.backtrack()
+ : &fall_through;
+ Label* word = backtrack_if_previous == kIsNonWord ? &fall_through
+ : new_trace.backtrack();
+
+ if (new_trace.cp_offset() == 0) {
+ // The start of input counts as a non-word character, so the question is
+ // decided if we are at the start.
+ assembler->CheckAtStart(non_word);
+ }
+ // We already checked that we are not at the start of input so it must be
+ // OK to load the previous character.
+ assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1, &dummy, false);
+ EmitWordCheck(assembler, word, non_word, backtrack_if_previous == kIsNonWord);
+
+ assembler->Bind(&fall_through);
+ on_success()->Emit(compiler, &new_trace);
+}
+
+void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int filled_in, bool not_at_start) {
+ if (assertion_type_ == AT_START && not_at_start) {
+ details->set_cannot_match();
+ return;
+ }
+ return on_success()->GetQuickCheckDetails(details, compiler, filled_in,
+ not_at_start);
+}
+
+void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ switch (assertion_type_) {
+ case AT_END: {
+ Label ok;
+ assembler->CheckPosition(trace->cp_offset(), &ok);
+ assembler->GoTo(trace->backtrack());
+ assembler->Bind(&ok);
+ break;
+ }
+ case AT_START: {
+ if (trace->at_start() == Trace::FALSE_VALUE) {
+ assembler->GoTo(trace->backtrack());
+ return;
+ }
+ if (trace->at_start() == Trace::UNKNOWN) {
+ assembler->CheckNotAtStart(trace->cp_offset(), trace->backtrack());
+ Trace at_start_trace = *trace;
+ at_start_trace.set_at_start(Trace::TRUE_VALUE);
+ on_success()->Emit(compiler, &at_start_trace);
+ return;
+ }
+ } break;
+ case AFTER_NEWLINE:
+ EmitHat(compiler, on_success(), trace);
+ return;
+ case AT_BOUNDARY:
+ case AT_NON_BOUNDARY: {
+ EmitBoundaryCheck(compiler, trace);
+ return;
+ }
+ }
+ on_success()->Emit(compiler, trace);
+}
+
+static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
+ if (quick_check == nullptr) return false;
+ if (offset >= quick_check->characters()) return false;
+ return quick_check->positions(offset)->determines_perfectly;
+}
+
+static void UpdateBoundsCheck(int index, int* checked_up_to) {
+ if (index > *checked_up_to) {
+ *checked_up_to = index;
+ }
+}
+
+// We call this repeatedly to generate code for each pass over the text node.
+// The passes are in increasing order of difficulty because we hope one
+// of the first passes will fail in which case we are saved the work of the
+// later passes. for example for the case independent regexp /%[asdfghjkl]a/
+// we will check the '%' in the first pass, the case independent 'a' in the
+// second pass and the character class in the last pass.
+//
+// The passes are done from right to left, so for example to test for /bar/
+// we will first test for an 'r' with offset 2, then an 'a' with offset 1
+// and then a 'b' with offset 0. This means we can avoid the end-of-input
+// bounds check most of the time. In the example we only need to check for
+// end-of-input when loading the putative 'r'.
+//
+// A slight complication involves the fact that the first character may already
+// be fetched into a register by the previous node. In this case we want to
+// do the test for that character first. We do this in separate passes. The
+// 'preloaded' argument indicates that we are doing such a 'pass'. If such a
+// pass has been performed then subsequent passes will have true in
+// first_element_checked to indicate that that character does not need to be
+// checked again.
+//
+// In addition to all this we are passed a Trace, which can
+// contain an AlternativeGeneration object. In this AlternativeGeneration
+// object we can see details of any quick check that was already passed in
+// order to get to the code we are now generating. The quick check can involve
+// loading characters, which means we do not need to recheck the bounds
+// up to the limit the quick check already checked. In addition the quick
+// check can have involved a mask and compare operation which may simplify
+// or obviate the need for further checks at some character positions.
+void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
+ bool preloaded, Trace* trace,
+ bool first_element_checked, int* checked_up_to) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->isolate();
+ bool one_byte = compiler->one_byte();
+ Label* backtrack = trace->backtrack();
+ QuickCheckDetails* quick_check = trace->quick_check_performed();
+ int element_count = elements()->length();
+ int backward_offset = read_backward() ? -Length() : 0;
+ for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
+ TextElement elm = elements()->at(i);
+ int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
+ if (elm.text_type() == TextElement::ATOM) {
+ if (SkipPass(pass, elm.atom()->ignore_case())) continue;
+ Vector<const uc16> quarks = elm.atom()->data();
+ for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
+ if (first_element_checked && i == 0 && j == 0) continue;
+ if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
+ EmitCharacterFunction* emit_function = nullptr;
+ uc16 quark = quarks[j];
+ if (elm.atom()->ignore_case()) {
+ // Everywhere else we assume that a non-Latin-1 character cannot match
+ // a Latin-1 character. Avoid the cases where this is assumption is
+ // invalid by using the Latin1 equivalent instead.
+ quark = unibrow::Latin1::TryConvertToLatin1(quark);
+ }
+ switch (pass) {
+ case NON_LATIN1_MATCH:
+ DCHECK(one_byte);
+ if (quark > String::kMaxOneByteCharCode) {
+ assembler->GoTo(backtrack);
+ return;
+ }
+ break;
+ case NON_LETTER_CHARACTER_MATCH:
+ emit_function = &EmitAtomNonLetter;
+ break;
+ case SIMPLE_CHARACTER_MATCH:
+ emit_function = &EmitSimpleCharacter;
+ break;
+ case CASE_CHARACTER_MATCH:
+ emit_function = &EmitAtomLetter;
+ break;
+ default:
+ break;
+ }
+ if (emit_function != nullptr) {
+ bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
+ bool bound_checked =
+ emit_function(isolate, compiler, quark, backtrack, cp_offset + j,
+ bounds_check, preloaded);
+ if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
+ }
+ }
+ } else {
+ DCHECK_EQ(TextElement::CHAR_CLASS, elm.text_type());
+ if (pass == CHARACTER_CLASS_MATCH) {
+ if (first_element_checked && i == 0) continue;
+ if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
+ RegExpCharacterClass* cc = elm.char_class();
+ bool bounds_check = *checked_up_to < cp_offset || read_backward();
+ EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
+ bounds_check, preloaded, zone());
+ UpdateBoundsCheck(cp_offset, checked_up_to);
+ }
+ }
+ }
+}
+
+int TextNode::Length() {
+ TextElement elm = elements()->last();
+ DCHECK_LE(0, elm.cp_offset());
+ return elm.cp_offset() + elm.length();
+}
+
+bool TextNode::SkipPass(TextEmitPassType pass, bool ignore_case) {
+ if (ignore_case) {
+ return pass == SIMPLE_CHARACTER_MATCH;
+ } else {
+ return pass == NON_LETTER_CHARACTER_MATCH || pass == CASE_CHARACTER_MATCH;
+ }
+}
+
+TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags) {
+ DCHECK_NOT_NULL(ranges);
+ ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, ranges, flags)),
+ zone);
+ return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+ CharacterRange trail,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags) {
+ ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
+ ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
+ ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, lead_ranges, flags)),
+ zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, trail_ranges, flags)),
+ zone);
+ return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+// This generates the code to match a text node. A text node can contain
+// straight character sequences (possibly to be matched in a case-independent
+// way) and character classes. For efficiency we do not do this in a single
+// pass from left to right. Instead we pass over the text node several times,
+// emitting code for some character positions every time. See the comment on
+// TextEmitPass for details.
+void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
+ compiler->SetRegExpTooBig();
+ return;
+ }
+
+ if (compiler->one_byte()) {
+ int dummy = 0;
+ TextEmitPass(compiler, NON_LATIN1_MATCH, false, trace, false, &dummy);
+ }
+
+ bool first_elt_done = false;
+ int bound_checked_to = trace->cp_offset() - 1;
+ bound_checked_to += trace->bound_checked_up_to();
+
+ // If a character is preloaded into the current character register then
+ // check that now.
+ if (trace->characters_preloaded() == 1) {
+ for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
+ TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), true, trace,
+ false, &bound_checked_to);
+ }
+ first_elt_done = true;
+ }
+
+ for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
+ TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), false, trace,
+ first_elt_done, &bound_checked_to);
+ }
+
+ Trace successor_trace(*trace);
+ // If we advance backward, we may end up at the start.
+ successor_trace.AdvanceCurrentPositionInTrace(
+ read_backward() ? -Length() : Length(), compiler);
+ successor_trace.set_at_start(read_backward() ? Trace::UNKNOWN
+ : Trace::FALSE_VALUE);
+ RecursionCheck rc(compiler);
+ on_success()->Emit(compiler, &successor_trace);
+}
+
+void Trace::InvalidateCurrentCharacter() { characters_preloaded_ = 0; }
+
+void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
+ // We don't have an instruction for shifting the current character register
+ // down or for using a shifted value for anything so lets just forget that
+ // we preloaded any characters into it.
+ characters_preloaded_ = 0;
+ // Adjust the offsets of the quick check performed information. This
+ // information is used to find out what we already determined about the
+ // characters by means of mask and compare.
+ quick_check_performed_.Advance(by, compiler->one_byte());
+ cp_offset_ += by;
+ if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
+ compiler->SetRegExpTooBig();
+ cp_offset_ = 0;
+ }
+ bound_checked_up_to_ = Max(0, bound_checked_up_to_ - by);
+}
+
+void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
+ int element_count = elements()->length();
+ for (int i = 0; i < element_count; i++) {
+ TextElement elm = elements()->at(i);
+ if (elm.text_type() == TextElement::CHAR_CLASS) {
+ RegExpCharacterClass* cc = elm.char_class();
+#ifdef V8_INTL_SUPPORT
+ bool case_equivalents_already_added =
+ NeedsUnicodeCaseEquivalents(cc->flags());
+#else
+ bool case_equivalents_already_added = false;
+#endif
+ if (IgnoreCase(cc->flags()) && !case_equivalents_already_added) {
+ // None of the standard character classes is different in the case
+ // independent case and it slows us down if we don't know that.
+ if (cc->is_standard(zone())) continue;
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::AddCaseEquivalents(isolate, zone(), ranges,
+ is_one_byte);
+ }
+ }
+ }
+}
+
+int TextNode::GreedyLoopTextLength() { return Length(); }
+
+RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) {
+ if (read_backward()) return nullptr;
+ if (elements()->length() != 1) return nullptr;
+ TextElement elm = elements()->at(0);
+ if (elm.text_type() != TextElement::CHAR_CLASS) return nullptr;
+ RegExpCharacterClass* node = elm.char_class();
+ ZoneList<CharacterRange>* ranges = node->ranges(zone());
+ CharacterRange::Canonicalize(ranges);
+ if (node->is_negated()) {
+ return ranges->length() == 0 ? on_success() : nullptr;
+ }
+ if (ranges->length() != 1) return nullptr;
+ uint32_t max_char;
+ if (compiler->one_byte()) {
+ max_char = String::kMaxOneByteCharCode;
+ } else {
+ max_char = String::kMaxUtf16CodeUnit;
+ }
+ return ranges->at(0).IsEverything(max_char) ? on_success() : nullptr;
+}
+
+// Finds the fixed match length of a sequence of nodes that goes from
+// this alternative and back to this choice node. If there are variable
+// length nodes or other complications in the way then return a sentinel
+// value indicating that a greedy loop cannot be constructed.
+int ChoiceNode::GreedyLoopTextLengthForAlternative(
+ GuardedAlternative* alternative) {
+ int length = 0;
+ RegExpNode* node = alternative->node();
+ // Later we will generate code for all these text nodes using recursion
+ // so we have to limit the max number.
+ int recursion_depth = 0;
+ while (node != this) {
+ if (recursion_depth++ > RegExpCompiler::kMaxRecursion) {
+ return kNodeIsTooComplexForGreedyLoops;
+ }
+ int node_length = node->GreedyLoopTextLength();
+ if (node_length == kNodeIsTooComplexForGreedyLoops) {
+ return kNodeIsTooComplexForGreedyLoops;
+ }
+ length += node_length;
+ SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
+ node = seq_node->on_success();
+ }
+ return read_backward() ? -length : length;
+}
+
+void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
+ DCHECK_NULL(loop_node_);
+ AddAlternative(alt);
+ loop_node_ = alt.node();
+}
+
+void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
+ DCHECK_NULL(continue_node_);
+ AddAlternative(alt);
+ continue_node_ = alt.node();
+}
+
+void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ if (trace->stop_node() == this) {
+ // Back edge of greedy optimized loop node graph.
+ int text_length =
+ GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
+ DCHECK_NE(kNodeIsTooComplexForGreedyLoops, text_length);
+ // Update the counter-based backtracking info on the stack. This is an
+ // optimization for greedy loops (see below).
+ DCHECK(trace->cp_offset() == text_length);
+ macro_assembler->AdvanceCurrentPosition(text_length);
+ macro_assembler->GoTo(trace->loop_label());
+ return;
+ }
+ DCHECK_NULL(trace->stop_node());
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+ ChoiceNode::Emit(compiler, trace);
+}
+
+int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
+ int eats_at_least) {
+ int preload_characters = Min(4, eats_at_least);
+ DCHECK_LE(preload_characters, 4);
+ if (compiler->macro_assembler()->CanReadUnaligned()) {
+ bool one_byte = compiler->one_byte();
+ if (one_byte) {
+ // We can't preload 3 characters because there is no machine instruction
+ // to do that. We can't just load 4 because we could be reading
+ // beyond the end of the string, which could cause a memory fault.
+ if (preload_characters == 3) preload_characters = 2;
+ } else {
+ if (preload_characters > 2) preload_characters = 2;
+ }
+ } else {
+ if (preload_characters > 1) preload_characters = 1;
+ }
+ return preload_characters;
+}
+
+// This class is used when generating the alternatives in a choice node. It
+// records the way the alternative is being code generated.
+class AlternativeGeneration : public Malloced {
+ public:
+ AlternativeGeneration()
+ : possible_success(),
+ expects_preload(false),
+ after(),
+ quick_check_details() {}
+ Label possible_success;
+ bool expects_preload;
+ Label after;
+ QuickCheckDetails quick_check_details;
+};
+
+// Creates a list of AlternativeGenerations. If the list has a reasonable
+// size then it is on the stack, otherwise the excess is on the heap.
+class AlternativeGenerationList {
+ public:
+ AlternativeGenerationList(int count, Zone* zone) : alt_gens_(count, zone) {
+ for (int i = 0; i < count && i < kAFew; i++) {
+ alt_gens_.Add(a_few_alt_gens_ + i, zone);
+ }
+ for (int i = kAFew; i < count; i++) {
+ alt_gens_.Add(new AlternativeGeneration(), zone);
+ }
+ }
+ ~AlternativeGenerationList() {
+ for (int i = kAFew; i < alt_gens_.length(); i++) {
+ delete alt_gens_[i];
+ alt_gens_[i] = nullptr;
+ }
+ }
+
+ AlternativeGeneration* at(int i) { return alt_gens_[i]; }
+
+ private:
+ static const int kAFew = 10;
+ ZoneList<AlternativeGeneration*> alt_gens_;
+ AlternativeGeneration a_few_alt_gens_[kAFew];
+};
+
+void BoyerMoorePositionInfo::Set(int character) {
+ SetInterval(Interval(character, character));
+}
+
+namespace {
+
+ContainedInLattice AddRange(ContainedInLattice containment, const int* ranges,
+ int ranges_length, Interval new_range) {
+ DCHECK_EQ(1, ranges_length & 1);
+ DCHECK_EQ(String::kMaxCodePoint + 1, ranges[ranges_length - 1]);
+ if (containment == kLatticeUnknown) return containment;
+ bool inside = false;
+ int last = 0;
+ for (int i = 0; i < ranges_length; inside = !inside, last = ranges[i], i++) {
+ // Consider the range from last to ranges[i].
+ // We haven't got to the new range yet.
+ if (ranges[i] <= new_range.from()) continue;
+ // New range is wholly inside last-ranges[i]. Note that new_range.to() is
+ // inclusive, but the values in ranges are not.
+ if (last <= new_range.from() && new_range.to() < ranges[i]) {
+ return Combine(containment, inside ? kLatticeIn : kLatticeOut);
+ }
+ return kLatticeUnknown;
+ }
+ return containment;
+}
+
+int BitsetFirstSetBit(BoyerMoorePositionInfo::Bitset bitset) {
+ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
+ 2 * kInt64Size * kBitsPerByte);
+
+ // Slight fiddling is needed here, since the bitset is of length 128 while
+ // CountTrailingZeros requires an integral type and std::bitset can only
+ // convert to unsigned long long. So we handle the most- and least-significant
+ // bits separately.
+
+ {
+ static constexpr BoyerMoorePositionInfo::Bitset mask(~uint64_t{0});
+ BoyerMoorePositionInfo::Bitset masked_bitset = bitset & mask;
+ STATIC_ASSERT(kInt64Size >= sizeof(decltype(masked_bitset.to_ullong())));
+ uint64_t lsb = masked_bitset.to_ullong();
+ if (lsb != 0) return base::bits::CountTrailingZeros(lsb);
+ }
+
+ {
+ BoyerMoorePositionInfo::Bitset masked_bitset = bitset >> 64;
+ uint64_t msb = masked_bitset.to_ullong();
+ if (msb != 0) return 64 + base::bits::CountTrailingZeros(msb);
+ }
+
+ return -1;
+}
+
+} // namespace
+
+void BoyerMoorePositionInfo::SetInterval(const Interval& interval) {
+ w_ = AddRange(w_, kWordRanges, kWordRangeCount, interval);
+
+ if (interval.size() >= kMapSize) {
+ map_count_ = kMapSize;
+ map_.set();
+ return;
+ }
+
+ for (int i = interval.from(); i <= interval.to(); i++) {
+ int mod_character = (i & kMask);
+ if (!map_[mod_character]) {
+ map_count_++;
+ map_.set(mod_character);
+ }
+ if (map_count_ == kMapSize) return;
+ }
+}
+
+void BoyerMoorePositionInfo::SetAll() {
+ w_ = kLatticeUnknown;
+ if (map_count_ != kMapSize) {
+ map_count_ = kMapSize;
+ map_.set();
+ }
+}
+
+BoyerMooreLookahead::BoyerMooreLookahead(int length, RegExpCompiler* compiler,
+ Zone* zone)
+ : length_(length), compiler_(compiler) {
+ if (compiler->one_byte()) {
+ max_char_ = String::kMaxOneByteCharCode;
+ } else {
+ max_char_ = String::kMaxUtf16CodeUnit;
+ }
+ bitmaps_ = new (zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
+ for (int i = 0; i < length; i++) {
+ bitmaps_->Add(new (zone) BoyerMoorePositionInfo(), zone);
+ }
+}
+
+// Find the longest range of lookahead that has the fewest number of different
+// characters that can occur at a given position. Since we are optimizing two
+// different parameters at once this is a tradeoff.
+bool BoyerMooreLookahead::FindWorthwhileInterval(int* from, int* to) {
+ int biggest_points = 0;
+ // If more than 32 characters out of 128 can occur it is unlikely that we can
+ // be lucky enough to step forwards much of the time.
+ const int kMaxMax = 32;
+ for (int max_number_of_chars = 4; max_number_of_chars < kMaxMax;
+ max_number_of_chars *= 2) {
+ biggest_points =
+ FindBestInterval(max_number_of_chars, biggest_points, from, to);
+ }
+ if (biggest_points == 0) return false;
+ return true;
+}
+
+// Find the highest-points range between 0 and length_ where the character
+// information is not too vague. 'Too vague' means that there are more than
+// max_number_of_chars that can occur at this position. Calculates the number
+// of points as the product of width-of-the-range and
+// probability-of-finding-one-of-the-characters, where the probability is
+// calculated using the frequency distribution of the sample subject string.
+int BoyerMooreLookahead::FindBestInterval(int max_number_of_chars,
+ int old_biggest_points, int* from,
+ int* to) {
+ int biggest_points = old_biggest_points;
+ static const int kSize = RegExpMacroAssembler::kTableSize;
+ for (int i = 0; i < length_;) {
+ while (i < length_ && Count(i) > max_number_of_chars) i++;
+ if (i == length_) break;
+ int remembered_from = i;
+
+ BoyerMoorePositionInfo::Bitset union_bitset;
+ for (; i < length_ && Count(i) <= max_number_of_chars; i++) {
+ union_bitset |= bitmaps_->at(i)->raw_bitset();
+ }
+
+ int frequency = 0;
+
+ // Iterate only over set bits.
+ int j;
+ while ((j = BitsetFirstSetBit(union_bitset)) != -1) {
+ DCHECK(union_bitset[j]); // Sanity check.
+ // Add 1 to the frequency to give a small per-character boost for
+ // the cases where our sampling is not good enough and many
+ // characters have a frequency of zero. This means the frequency
+ // can theoretically be up to 2*kSize though we treat it mostly as
+ // a fraction of kSize.
+ frequency += compiler_->frequency_collator()->Frequency(j) + 1;
+ union_bitset.reset(j);
+ }
+
+ // We use the probability of skipping times the distance we are skipping to
+ // judge the effectiveness of this. Actually we have a cut-off: By
+ // dividing by 2 we switch off the skipping if the probability of skipping
+ // is less than 50%. This is because the multibyte mask-and-compare
+ // skipping in quickcheck is more likely to do well on this case.
+ bool in_quickcheck_range =
+ ((i - remembered_from < 4) ||
+ (compiler_->one_byte() ? remembered_from <= 4 : remembered_from <= 2));
+ // Called 'probability' but it is only a rough estimate and can actually
+ // be outside the 0-kSize range.
+ int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
+ int points = (i - remembered_from) * probability;
+ if (points > biggest_points) {
+ *from = remembered_from;
+ *to = i - 1;
+ biggest_points = points;
+ }
+ }
+ return biggest_points;
+}
+
+// Take all the characters that will not prevent a successful match if they
+// occur in the subject string in the range between min_lookahead and
+// max_lookahead (inclusive) measured from the current position. If the
+// character at max_lookahead offset is not one of these characters, then we
+// can safely skip forwards by the number of characters in the range.
+int BoyerMooreLookahead::GetSkipTable(int min_lookahead, int max_lookahead,
+ Handle<ByteArray> boolean_skip_table) {
+ const int kSkipArrayEntry = 0;
+ const int kDontSkipArrayEntry = 1;
+
+ std::memset(boolean_skip_table->GetDataStartAddress(), kSkipArrayEntry,
+ boolean_skip_table->length());
+
+ for (int i = max_lookahead; i >= min_lookahead; i--) {
+ BoyerMoorePositionInfo::Bitset bitset = bitmaps_->at(i)->raw_bitset();
+
+ // Iterate only over set bits.
+ int j;
+ while ((j = BitsetFirstSetBit(bitset)) != -1) {
+ DCHECK(bitset[j]); // Sanity check.
+ boolean_skip_table->set(j, kDontSkipArrayEntry);
+ bitset.reset(j);
+ }
+ }
+
+ const int skip = max_lookahead + 1 - min_lookahead;
+ return skip;
+}
+
+// See comment above on the implementation of GetSkipTable.
+void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
+ const int kSize = RegExpMacroAssembler::kTableSize;
+
+ int min_lookahead = 0;
+ int max_lookahead = 0;
+
+ if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return;
+
+ // Check if we only have a single non-empty position info, and that info
+ // contains precisely one character.
+ bool found_single_character = false;
+ int single_character = 0;
+ for (int i = max_lookahead; i >= min_lookahead; i--) {
+ BoyerMoorePositionInfo* map = bitmaps_->at(i);
+ if (map->map_count() == 0) continue;
+
+ if (found_single_character || map->map_count() > 1) {
+ found_single_character = false;
+ break;
+ }
+
+ DCHECK(!found_single_character);
+ DCHECK_EQ(map->map_count(), 1);
+
+ found_single_character = true;
+ single_character = BitsetFirstSetBit(map->raw_bitset());
+
+ DCHECK_NE(single_character, -1);
+ }
+
+ int lookahead_width = max_lookahead + 1 - min_lookahead;
+
+ if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
+ // The mask-compare can probably handle this better.
+ return;
+ }
+
+ if (found_single_character) {
+ Label cont, again;
+ masm->Bind(&again);
+ masm->LoadCurrentCharacter(max_lookahead, &cont, true);
+ if (max_char_ > kSize) {
+ masm->CheckCharacterAfterAnd(single_character,
+ RegExpMacroAssembler::kTableMask, &cont);
+ } else {
+ masm->CheckCharacter(single_character, &cont);
+ }
+ masm->AdvanceCurrentPosition(lookahead_width);
+ masm->GoTo(&again);
+ masm->Bind(&cont);
+ return;
+ }
+
+ Factory* factory = masm->isolate()->factory();
+ Handle<ByteArray> boolean_skip_table =
+ factory->NewByteArray(kSize, AllocationType::kOld);
+ int skip_distance =
+ GetSkipTable(min_lookahead, max_lookahead, boolean_skip_table);
+ DCHECK_NE(0, skip_distance);
+
+ Label cont, again;
+ masm->Bind(&again);
+ masm->LoadCurrentCharacter(max_lookahead, &cont, true);
+ masm->CheckBitInTable(boolean_skip_table, &cont);
+ masm->AdvanceCurrentPosition(skip_distance);
+ masm->GoTo(&again);
+ masm->Bind(&cont);
+}
+
+/* Code generation for choice nodes.
+ *
+ * We generate quick checks that do a mask and compare to eliminate a
+ * choice. If the quick check succeeds then it jumps to the continuation to
+ * do slow checks and check subsequent nodes. If it fails (the common case)
+ * it falls through to the next choice.
+ *
+ * Here is the desired flow graph. Nodes directly below each other imply
+ * fallthrough. Alternatives 1 and 2 have quick checks. Alternative
+ * 3 doesn't have a quick check so we have to call the slow check.
+ * Nodes are marked Qn for quick checks and Sn for slow checks. The entire
+ * regexp continuation is generated directly after the Sn node, up to the
+ * next GoTo if we decide to reuse some already generated code. Some
+ * nodes expect preload_characters to be preloaded into the current
+ * character register. R nodes do this preloading. Vertices are marked
+ * F for failures and S for success (possible success in the case of quick
+ * nodes). L, V, < and > are used as arrow heads.
+ *
+ * ----------> R
+ * |
+ * V
+ * Q1 -----> S1
+ * | S /
+ * F| /
+ * | F/
+ * | /
+ * | R
+ * | /
+ * V L
+ * Q2 -----> S2
+ * | S /
+ * F| /
+ * | F/
+ * | /
+ * | R
+ * | /
+ * V L
+ * S3
+ * |
+ * F|
+ * |
+ * R
+ * |
+ * backtrack V
+ * <----------Q4
+ * \ F |
+ * \ |S
+ * \ F V
+ * \-----S4
+ *
+ * For greedy loops we push the current position, then generate the code that
+ * eats the input specially in EmitGreedyLoop. The other choice (the
+ * continuation) is generated by the normal code in EmitChoices, and steps back
+ * in the input to the starting position when it fails to match. The loop code
+ * looks like this (U is the unwind code that steps back in the greedy loop).
+ *
+ * _____
+ * / \
+ * V |
+ * ----------> S1 |
+ * /| |
+ * / |S |
+ * F/ \_____/
+ * /
+ * |<-----
+ * | \
+ * V |S
+ * Q2 ---> U----->backtrack
+ * | F /
+ * S| /
+ * V F /
+ * S2--/
+ */
+
+GreedyLoopState::GreedyLoopState(bool not_at_start) {
+ counter_backtrack_trace_.set_backtrack(&label_);
+ if (not_at_start) counter_backtrack_trace_.set_at_start(Trace::FALSE_VALUE);
+}
+
+void ChoiceNode::AssertGuardsMentionRegisters(Trace* trace) {
+#ifdef DEBUG
+ int choice_count = alternatives_->length();
+ for (int i = 0; i < choice_count - 1; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ ZoneList<Guard*>* guards = alternative.guards();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
+ for (int j = 0; j < guard_count; j++) {
+ DCHECK(!trace->mentions_reg(guards->at(j)->reg()));
+ }
+ }
+#endif
+}
+
+void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler, Trace* current_trace,
+ PreloadState* state) {
+ if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
+ // Save some time by looking at most one machine word ahead.
+ state->eats_at_least_ =
+ EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
+ current_trace->at_start() == Trace::FALSE_VALUE);
+ }
+ state->preload_characters_ =
+ CalculatePreloadCharacters(compiler, state->eats_at_least_);
+
+ state->preload_is_current_ =
+ (current_trace->characters_preloaded() == state->preload_characters_);
+ state->preload_has_checked_bounds_ = state->preload_is_current_;
+}
+
+void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ int choice_count = alternatives_->length();
+
+ if (choice_count == 1 && alternatives_->at(0).guards() == nullptr) {
+ alternatives_->at(0).node()->Emit(compiler, trace);
+ return;
+ }
+
+ AssertGuardsMentionRegisters(trace);
+
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ // For loop nodes we already flushed (see LoopChoiceNode::Emit), but for
+ // other choice nodes we only flush if we are out of code size budget.
+ if (trace->flush_budget() == 0 && trace->actions() != nullptr) {
+ trace->Flush(compiler, this);
+ return;
+ }
+
+ RecursionCheck rc(compiler);
+
+ PreloadState preload;
+ preload.init();
+ GreedyLoopState greedy_loop_state(not_at_start());
+
+ int text_length = GreedyLoopTextLengthForAlternative(&alternatives_->at(0));
+ AlternativeGenerationList alt_gens(choice_count, zone());
+
+ if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
+ trace = EmitGreedyLoop(compiler, trace, &alt_gens, &preload,
+ &greedy_loop_state, text_length);
+ } else {
+ // TODO(erikcorry): Delete this. We don't need this label, but it makes us
+ // match the traces produced pre-cleanup.
+ Label second_choice;
+ compiler->macro_assembler()->Bind(&second_choice);
+
+ preload.eats_at_least_ = EmitOptimizedUnanchoredSearch(compiler, trace);
+
+ EmitChoices(compiler, &alt_gens, 0, trace, &preload);
+ }
+
+ // At this point we need to generate slow checks for the alternatives where
+ // the quick check was inlined. We can recognize these because the associated
+ // label was bound.
+ int new_flush_budget = trace->flush_budget() / choice_count;
+ for (int i = 0; i < choice_count; i++) {
+ AlternativeGeneration* alt_gen = alt_gens.at(i);
+ Trace new_trace(*trace);
+ // If there are actions to be flushed we have to limit how many times
+ // they are flushed. Take the budget of the parent trace and distribute
+ // it fairly amongst the children.
+ if (new_trace.actions() != nullptr) {
+ new_trace.set_flush_budget(new_flush_budget);
+ }
+ bool next_expects_preload =
+ i == choice_count - 1 ? false : alt_gens.at(i + 1)->expects_preload;
+ EmitOutOfLineContinuation(compiler, &new_trace, alternatives_->at(i),
+ alt_gen, preload.preload_characters_,
+ next_expects_preload);
+ }
+}
+
+Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler, Trace* trace,
+ AlternativeGenerationList* alt_gens,
+ PreloadState* preload,
+ GreedyLoopState* greedy_loop_state,
+ int text_length) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ // Here we have special handling for greedy loops containing only text nodes
+ // and other simple nodes. These are handled by pushing the current
+ // position on the stack and then incrementing the current position each
+ // time around the switch. On backtrack we decrement the current position
+ // and check it against the pushed value. This avoids pushing backtrack
+ // information for each iteration of the loop, which could take up a lot of
+ // space.
+ DCHECK(trace->stop_node() == nullptr);
+ macro_assembler->PushCurrentPosition();
+ Label greedy_match_failed;
+ Trace greedy_match_trace;
+ if (not_at_start()) greedy_match_trace.set_at_start(Trace::FALSE_VALUE);
+ greedy_match_trace.set_backtrack(&greedy_match_failed);
+ Label loop_label;
+ macro_assembler->Bind(&loop_label);
+ greedy_match_trace.set_stop_node(this);
+ greedy_match_trace.set_loop_label(&loop_label);
+ alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
+ macro_assembler->Bind(&greedy_match_failed);
+
+ Label second_choice; // For use in greedy matches.
+ macro_assembler->Bind(&second_choice);
+
+ Trace* new_trace = greedy_loop_state->counter_backtrack_trace();
+
+ EmitChoices(compiler, alt_gens, 1, new_trace, preload);
+
+ macro_assembler->Bind(greedy_loop_state->label());
+ // If we have unwound to the bottom then backtrack.
+ macro_assembler->CheckGreedyLoop(trace->backtrack());
+ // Otherwise try the second priority at an earlier position.
+ macro_assembler->AdvanceCurrentPosition(-text_length);
+ macro_assembler->GoTo(&second_choice);
+ return new_trace;
+}
+
+int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
+ Trace* trace) {
+ int eats_at_least = PreloadState::kEatsAtLeastNotYetInitialized;
+ if (alternatives_->length() != 2) return eats_at_least;
+
+ GuardedAlternative alt1 = alternatives_->at(1);
+ if (alt1.guards() != nullptr && alt1.guards()->length() != 0) {
+ return eats_at_least;
+ }
+ RegExpNode* eats_anything_node = alt1.node();
+ if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) != this) {
+ return eats_at_least;
+ }
+
+ // Really we should be creating a new trace when we execute this function,
+ // but there is no need, because the code it generates cannot backtrack, and
+ // we always arrive here with a trivial trace (since it's the entry to a
+ // loop. That also implies that there are no preloaded characters, which is
+ // good, because it means we won't be violating any assumptions by
+ // overwriting those characters with new load instructions.
+ DCHECK(trace->is_trivial());
+
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ Isolate* isolate = macro_assembler->isolate();
+ // At this point we know that we are at a non-greedy loop that will eat
+ // any character one at a time. Any non-anchored regexp has such a
+ // loop prepended to it in order to find where it starts. We look for
+ // a pattern of the form ...abc... where we can look 6 characters ahead
+ // and step forwards 3 if the character is not one of abc. Abc need
+ // not be atoms, they can be any reasonably limited character class or
+ // small alternation.
+ BoyerMooreLookahead* bm = bm_info(false);
+ if (bm == nullptr) {
+ eats_at_least =
+ Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore, kRecursionBudget, false));
+ if (eats_at_least >= 1) {
+ bm = new (zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
+ GuardedAlternative alt0 = alternatives_->at(0);
+ alt0.node()->FillInBMInfo(isolate, 0, kRecursionBudget, bm, false);
+ }
+ }
+ if (bm != nullptr) {
+ bm->EmitSkipInstructions(macro_assembler);
+ }
+ return eats_at_least;
+}
+
+void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
+ AlternativeGenerationList* alt_gens,
+ int first_choice, Trace* trace,
+ PreloadState* preload) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ SetUpPreLoad(compiler, trace, preload);
+
+ // For now we just call all choices one after the other. The idea ultimately
+ // is to use the Dispatch table to try only the relevant ones.
+ int choice_count = alternatives_->length();
+
+ int new_flush_budget = trace->flush_budget() / choice_count;
+
+ for (int i = first_choice; i < choice_count; i++) {
+ bool is_last = i == choice_count - 1;
+ bool fall_through_on_failure = !is_last;
+ GuardedAlternative alternative = alternatives_->at(i);
+ AlternativeGeneration* alt_gen = alt_gens->at(i);
+ alt_gen->quick_check_details.set_characters(preload->preload_characters_);
+ ZoneList<Guard*>* guards = alternative.guards();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
+ Trace new_trace(*trace);
+ new_trace.set_characters_preloaded(
+ preload->preload_is_current_ ? preload->preload_characters_ : 0);
+ if (preload->preload_has_checked_bounds_) {
+ new_trace.set_bound_checked_up_to(preload->preload_characters_);
+ }
+ new_trace.quick_check_performed()->Clear();
+ if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
+ if (!is_last) {
+ new_trace.set_backtrack(&alt_gen->after);
+ }
+ alt_gen->expects_preload = preload->preload_is_current_;
+ bool generate_full_check_inline = false;
+ if (compiler->optimize() &&
+ try_to_emit_quick_check_for_alternative(i == 0) &&
+ alternative.node()->EmitQuickCheck(
+ compiler, trace, &new_trace, preload->preload_has_checked_bounds_,
+ &alt_gen->possible_success, &alt_gen->quick_check_details,
+ fall_through_on_failure)) {
+ // Quick check was generated for this choice.
+ preload->preload_is_current_ = true;
+ preload->preload_has_checked_bounds_ = true;
+ // If we generated the quick check to fall through on possible success,
+ // we now need to generate the full check inline.
+ if (!fall_through_on_failure) {
+ macro_assembler->Bind(&alt_gen->possible_success);
+ new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
+ new_trace.set_characters_preloaded(preload->preload_characters_);
+ new_trace.set_bound_checked_up_to(preload->preload_characters_);
+ generate_full_check_inline = true;
+ }
+ } else if (alt_gen->quick_check_details.cannot_match()) {
+ if (!fall_through_on_failure) {
+ macro_assembler->GoTo(trace->backtrack());
+ }
+ continue;
+ } else {
+ // No quick check was generated. Put the full code here.
+ // If this is not the first choice then there could be slow checks from
+ // previous cases that go here when they fail. There's no reason to
+ // insist that they preload characters since the slow check we are about
+ // to generate probably can't use it.
+ if (i != first_choice) {
+ alt_gen->expects_preload = false;
+ new_trace.InvalidateCurrentCharacter();
+ }
+ generate_full_check_inline = true;
+ }
+ if (generate_full_check_inline) {
+ if (new_trace.actions() != nullptr) {
+ new_trace.set_flush_budget(new_flush_budget);
+ }
+ for (int j = 0; j < guard_count; j++) {
+ GenerateGuard(macro_assembler, guards->at(j), &new_trace);
+ }
+ alternative.node()->Emit(compiler, &new_trace);
+ preload->preload_is_current_ = false;
+ }
+ macro_assembler->Bind(&alt_gen->after);
+ }
+}
+
+void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
+ Trace* trace,
+ GuardedAlternative alternative,
+ AlternativeGeneration* alt_gen,
+ int preload_characters,
+ bool next_expects_preload) {
+ if (!alt_gen->possible_success.is_linked()) return;
+
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ macro_assembler->Bind(&alt_gen->possible_success);
+ Trace out_of_line_trace(*trace);
+ out_of_line_trace.set_characters_preloaded(preload_characters);
+ out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
+ if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE_VALUE);
+ ZoneList<Guard*>* guards = alternative.guards();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
+ if (next_expects_preload) {
+ Label reload_current_char;
+ out_of_line_trace.set_backtrack(&reload_current_char);
+ for (int j = 0; j < guard_count; j++) {
+ GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
+ }
+ alternative.node()->Emit(compiler, &out_of_line_trace);
+ macro_assembler->Bind(&reload_current_char);
+ // Reload the current character, since the next quick check expects that.
+ // We don't need to check bounds here because we only get into this
+ // code through a quick check which already did the checked load.
+ macro_assembler->LoadCurrentCharacter(trace->cp_offset(), nullptr, false,
+ preload_characters);
+ macro_assembler->GoTo(&(alt_gen->after));
+ } else {
+ out_of_line_trace.set_backtrack(&(alt_gen->after));
+ for (int j = 0; j < guard_count; j++) {
+ GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
+ }
+ alternative.node()->Emit(compiler, &out_of_line_trace);
+ }
+}
+
+void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ RecursionCheck rc(compiler);
+
+ switch (action_type_) {
+ case STORE_POSITION: {
+ Trace::DeferredCapture new_capture(data_.u_position_register.reg,
+ data_.u_position_register.is_capture,
+ trace);
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_capture);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case INCREMENT_REGISTER: {
+ Trace::DeferredIncrementRegister new_increment(
+ data_.u_increment_register.reg);
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_increment);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case SET_REGISTER: {
+ Trace::DeferredSetRegister new_set(data_.u_store_register.reg,
+ data_.u_store_register.value);
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_set);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case CLEAR_CAPTURES: {
+ Trace::DeferredClearCaptures new_capture(Interval(
+ data_.u_clear_captures.range_from, data_.u_clear_captures.range_to));
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_capture);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case BEGIN_SUBMATCH:
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ } else {
+ assembler->WriteCurrentPositionToRegister(
+ data_.u_submatch.current_position_register, 0);
+ assembler->WriteStackPointerToRegister(
+ data_.u_submatch.stack_pointer_register);
+ on_success()->Emit(compiler, trace);
+ }
+ break;
+ case EMPTY_MATCH_CHECK: {
+ int start_pos_reg = data_.u_empty_match_check.start_register;
+ int stored_pos = 0;
+ int rep_reg = data_.u_empty_match_check.repetition_register;
+ bool has_minimum = (rep_reg != RegExpCompiler::kNoRegister);
+ bool know_dist = trace->GetStoredPosition(start_pos_reg, &stored_pos);
+ if (know_dist && !has_minimum && stored_pos == trace->cp_offset()) {
+ // If we know we haven't advanced and there is no minimum we
+ // can just backtrack immediately.
+ assembler->GoTo(trace->backtrack());
+ } else if (know_dist && stored_pos < trace->cp_offset()) {
+ // If we know we've advanced we can generate the continuation
+ // immediately.
+ on_success()->Emit(compiler, trace);
+ } else if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ } else {
+ Label skip_empty_check;
+ // If we have a minimum number of repetitions we check the current
+ // number first and skip the empty check if it's not enough.
+ if (has_minimum) {
+ int limit = data_.u_empty_match_check.repetition_limit;
+ assembler->IfRegisterLT(rep_reg, limit, &skip_empty_check);
+ }
+ // If the match is empty we bail out, otherwise we fall through
+ // to the on-success continuation.
+ assembler->IfRegisterEqPos(data_.u_empty_match_check.start_register,
+ trace->backtrack());
+ assembler->Bind(&skip_empty_check);
+ on_success()->Emit(compiler, trace);
+ }
+ break;
+ }
+ case POSITIVE_SUBMATCH_SUCCESS: {
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+ assembler->ReadCurrentPositionFromRegister(
+ data_.u_submatch.current_position_register);
+ assembler->ReadStackPointerFromRegister(
+ data_.u_submatch.stack_pointer_register);
+ int clear_register_count = data_.u_submatch.clear_register_count;
+ if (clear_register_count == 0) {
+ on_success()->Emit(compiler, trace);
+ return;
+ }
+ int clear_registers_from = data_.u_submatch.clear_register_from;
+ Label clear_registers_backtrack;
+ Trace new_trace = *trace;
+ new_trace.set_backtrack(&clear_registers_backtrack);
+ on_success()->Emit(compiler, &new_trace);
+
+ assembler->Bind(&clear_registers_backtrack);
+ int clear_registers_to = clear_registers_from + clear_register_count - 1;
+ assembler->ClearRegisters(clear_registers_from, clear_registers_to);
+
+ DCHECK(trace->backtrack() == nullptr);
+ assembler->Backtrack();
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ RecursionCheck rc(compiler);
+
+ DCHECK_EQ(start_reg_ + 1, end_reg_);
+ if (IgnoreCase(flags_)) {
+ assembler->CheckNotBackReferenceIgnoreCase(
+ start_reg_, read_backward(), IsUnicode(flags_), trace->backtrack());
+ } else {
+ assembler->CheckNotBackReference(start_reg_, read_backward(),
+ trace->backtrack());
+ }
+ // We are going to advance backward, so we may end up at the start.
+ if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
+
+ // Check that the back reference does not end inside a surrogate pair.
+ if (IsUnicode(flags_) && !compiler->one_byte()) {
+ assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
+ }
+ on_success()->Emit(compiler, trace);
+}
+
+// -------------------------------------------------------------------
+// Analysis
+
+void Analysis::EnsureAnalyzed(RegExpNode* that) {
+ StackLimitCheck check(isolate());
+ if (check.HasOverflowed()) {
+ fail("Stack overflow");
+ return;
+ }
+ if (that->info()->been_analyzed || that->info()->being_analyzed) return;
+ that->info()->being_analyzed = true;
+ that->Accept(this);
+ that->info()->being_analyzed = false;
+ that->info()->been_analyzed = true;
+}
+
+void Analysis::VisitEnd(EndNode* that) {
+ // nothing to do
+}
+
+void TextNode::CalculateOffsets() {
+ int element_count = elements()->length();
+ // Set up the offsets of the elements relative to the start. This is a fixed
+ // quantity since a TextNode can only contain fixed-width things.
+ int cp_offset = 0;
+ for (int i = 0; i < element_count; i++) {
+ TextElement& elm = elements()->at(i);
+ elm.set_cp_offset(cp_offset);
+ cp_offset += elm.length();
+ }
+}
+
+void Analysis::VisitText(TextNode* that) {
+ that->MakeCaseIndependent(isolate(), is_one_byte_);
+ EnsureAnalyzed(that->on_success());
+ if (!has_failed()) {
+ that->CalculateOffsets();
+ }
+}
+
+void Analysis::VisitAction(ActionNode* that) {
+ RegExpNode* target = that->on_success();
+ EnsureAnalyzed(target);
+ if (!has_failed()) {
+ // If the next node is interested in what it follows then this node
+ // has to be interested too so it can pass the information on.
+ that->info()->AddFromFollowing(target->info());
+ }
+}
+
+void Analysis::VisitChoice(ChoiceNode* that) {
+ NodeInfo* info = that->info();
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ RegExpNode* node = that->alternatives()->at(i).node();
+ EnsureAnalyzed(node);
+ if (has_failed()) return;
+ // Anything the following nodes need to know has to be known by
+ // this node also, so it can pass it on.
+ info->AddFromFollowing(node->info());
+ }
+}
+
+void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
+ NodeInfo* info = that->info();
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ RegExpNode* node = that->alternatives()->at(i).node();
+ if (node != that->loop_node()) {
+ EnsureAnalyzed(node);
+ if (has_failed()) return;
+ info->AddFromFollowing(node->info());
+ }
+ }
+ // Check the loop last since it may need the value of this node
+ // to get a correct result.
+ EnsureAnalyzed(that->loop_node());
+ if (!has_failed()) {
+ info->AddFromFollowing(that->loop_node()->info());
+ }
+}
+
+void Analysis::VisitBackReference(BackReferenceNode* that) {
+ EnsureAnalyzed(that->on_success());
+}
+
+void Analysis::VisitAssertion(AssertionNode* that) {
+ EnsureAnalyzed(that->on_success());
+}
+
+void BackReferenceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ // Working out the set of characters that a backreference can match is too
+ // hard, so we just say that any character can match.
+ bm->SetRest(offset);
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
+ RegExpMacroAssembler::kTableSize);
+
+void ChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ ZoneList<GuardedAlternative>* alts = alternatives();
+ budget = (budget - 1) / alts->length();
+ for (int i = 0; i < alts->length(); i++) {
+ GuardedAlternative& alt = alts->at(i);
+ if (alt.guards() != nullptr && alt.guards()->length() != 0) {
+ bm->SetRest(offset); // Give up trying to fill in info.
+ SaveBMInfo(bm, not_at_start, offset);
+ return;
+ }
+ alt.node()->FillInBMInfo(isolate, offset, budget, bm, not_at_start);
+ }
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ if (initial_offset >= bm->length()) return;
+ int offset = initial_offset;
+ int max_char = bm->max_char();
+ for (int i = 0; i < elements()->length(); i++) {
+ if (offset >= bm->length()) {
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+ return;
+ }
+ TextElement text = elements()->at(i);
+ if (text.text_type() == TextElement::ATOM) {
+ RegExpAtom* atom = text.atom();
+ for (int j = 0; j < atom->length(); j++, offset++) {
+ if (offset >= bm->length()) {
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+ return;
+ }
+ uc16 character = atom->data()[j];
+ if (IgnoreCase(atom->flags())) {
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(
+ isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
+ chars, 4);
+ for (int j = 0; j < length; j++) {
+ bm->Set(offset, chars[j]);
+ }
+ } else {
+ if (character <= max_char) bm->Set(offset, character);
+ }
+ }
+ } else {
+ DCHECK_EQ(TextElement::CHAR_CLASS, text.text_type());
+ RegExpCharacterClass* char_class = text.char_class();
+ ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
+ if (char_class->is_negated()) {
+ bm->SetAll(offset);
+ } else {
+ for (int k = 0; k < ranges->length(); k++) {
+ CharacterRange& range = ranges->at(k);
+ if (range.from() > max_char) continue;
+ int to = Min(max_char, static_cast<int>(range.to()));
+ bm->SetInterval(offset, Interval(range.from(), to));
+ }
+ }
+ offset++;
+ }
+ }
+ if (offset >= bm->length()) {
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+ return;
+ }
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm,
+ true); // Not at start after a text node.
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+}
+
+// static
+RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
+ RegExpCompiler* compiler, RegExpNode* on_success, JSRegExp::Flags flags) {
+ DCHECK(!compiler->read_backward());
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+ ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+ ChoiceNode* optional_step_back = new (zone) ChoiceNode(2, zone);
+
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpNode* step_back = TextNode::CreateForCharacterRanges(
+ zone, lead_surrogates, true, on_success, flags);
+ RegExpLookaround::Builder builder(true, step_back, stack_register,
+ position_register);
+ RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
+ zone, trail_surrogates, false, builder.on_match_success(), flags);
+
+ optional_step_back->AddAlternative(
+ GuardedAlternative(builder.ForMatch(match_trail)));
+ optional_step_back->AddAlternative(GuardedAlternative(on_success));
+
+ return optional_step_back;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
new file mode 100644
index 0000000000..1b70abfd98
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -0,0 +1,657 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_COMPILER_H_
+#define V8_REGEXP_REGEXP_COMPILER_H_
+
+#include <bitset>
+
+#include "src/base/small-vector.h"
+#include "src/regexp/regexp-nodes.h"
+
+namespace v8 {
+namespace internal {
+
+class DynamicBitSet;
+class Isolate;
+
+namespace regexp_compiler_constants {
+
+// The '2' variant is has inclusive from and exclusive to.
+// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
+// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
+constexpr uc32 kRangeEndMarker = 0x110000;
+constexpr int kSpaceRanges[] = {
+ '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0, 0x00A1, 0x1680,
+ 0x1681, 0x2000, 0x200B, 0x2028, 0x202A, 0x202F, 0x2030,
+ 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, kRangeEndMarker};
+constexpr int kSpaceRangeCount = arraysize(kSpaceRanges);
+
+constexpr int kWordRanges[] = {'0', '9' + 1, 'A', 'Z' + 1, '_',
+ '_' + 1, 'a', 'z' + 1, kRangeEndMarker};
+constexpr int kWordRangeCount = arraysize(kWordRanges);
+constexpr int kDigitRanges[] = {'0', '9' + 1, kRangeEndMarker};
+constexpr int kDigitRangeCount = arraysize(kDigitRanges);
+constexpr int kSurrogateRanges[] = {kLeadSurrogateStart,
+ kLeadSurrogateStart + 1, kRangeEndMarker};
+constexpr int kSurrogateRangeCount = arraysize(kSurrogateRanges);
+constexpr int kLineTerminatorRanges[] = {0x000A, 0x000B, 0x000D, 0x000E,
+ 0x2028, 0x202A, kRangeEndMarker};
+constexpr int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
+
+// More makes code generation slower, less makes V8 benchmark score lower.
+constexpr int kMaxLookaheadForBoyerMoore = 8;
+// In a 3-character pattern you can maximally step forwards 3 characters
+// at a time, which is not always enough to pay for the extra logic.
+constexpr int kPatternTooShortForBoyerMoore = 2;
+
+} // namespace regexp_compiler_constants
+
+inline bool IgnoreCase(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kIgnoreCase) != 0;
+}
+
+inline bool IsUnicode(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kUnicode) != 0;
+}
+
+inline bool IsSticky(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kSticky) != 0;
+}
+
+inline bool IsGlobal(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kGlobal) != 0;
+}
+
+inline bool DotAll(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kDotAll) != 0;
+}
+
+inline bool Multiline(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kMultiline) != 0;
+}
+
+inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
+ // Both unicode and ignore_case flags are set. We need to use ICU to find
+ // the closure over case equivalents.
+ return IsUnicode(flags) && IgnoreCase(flags);
+}
+
+// Details of a quick mask-compare check that can look ahead in the
+// input stream.
+class QuickCheckDetails {
+ public:
+ QuickCheckDetails()
+ : characters_(0), mask_(0), value_(0), cannot_match_(false) {}
+ explicit QuickCheckDetails(int characters)
+ : characters_(characters), mask_(0), value_(0), cannot_match_(false) {}
+ bool Rationalize(bool one_byte);
+ // Merge in the information from another branch of an alternation.
+ void Merge(QuickCheckDetails* other, int from_index);
+ // Advance the current position by some amount.
+ void Advance(int by, bool one_byte);
+ void Clear();
+ bool cannot_match() { return cannot_match_; }
+ void set_cannot_match() { cannot_match_ = true; }
+ struct Position {
+ Position() : mask(0), value(0), determines_perfectly(false) {}
+ uc16 mask;
+ uc16 value;
+ bool determines_perfectly;
+ };
+ int characters() { return characters_; }
+ void set_characters(int characters) { characters_ = characters; }
+ Position* positions(int index) {
+ DCHECK_LE(0, index);
+ DCHECK_GT(characters_, index);
+ return positions_ + index;
+ }
+ uint32_t mask() { return mask_; }
+ uint32_t value() { return value_; }
+
+ private:
+ // How many characters do we have quick check information from. This is
+ // the same for all branches of a choice node.
+ int characters_;
+ Position positions_[4];
+ // These values are the condensate of the above array after Rationalize().
+ uint32_t mask_;
+ uint32_t value_;
+ // If set to true, there is no way this quick check can match at all.
+ // E.g., if it requires to be at the start of the input, and isn't.
+ bool cannot_match_;
+};
+
+// Improve the speed that we scan for an initial point where a non-anchored
+// regexp can match by using a Boyer-Moore-like table. This is done by
+// identifying non-greedy non-capturing loops in the nodes that eat any
+// character one at a time. For example in the middle of the regexp
+// /foo[\s\S]*?bar/ we find such a loop. There is also such a loop implicitly
+// inserted at the start of any non-anchored regexp.
+//
+// When we have found such a loop we look ahead in the nodes to find the set of
+// characters that can come at given distances. For example for the regexp
+// /.?foo/ we know that there are at least 3 characters ahead of us, and the
+// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
+// the lookahead info where the set of characters is reasonably constrained. In
+// our example this is from index 1 to 2 (0 is not constrained). We can now
+// look 3 characters ahead and if we don't find one of [f, o] (the union of
+// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
+//
+// For Unicode input strings we do the same, but modulo 128.
+//
+// We also look at the first string fed to the regexp and use that to get a hint
+// of the character frequencies in the inputs. This affects the assessment of
+// whether the set of characters is 'reasonably constrained'.
+//
+// We also have another lookahead mechanism (called quick check in the code),
+// which uses a wide load of multiple characters followed by a mask and compare
+// to determine whether a match is possible at this point.
+enum ContainedInLattice {
+ kNotYet = 0,
+ kLatticeIn = 1,
+ kLatticeOut = 2,
+ kLatticeUnknown = 3 // Can also mean both in and out.
+};
+
+inline ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b) {
+ return static_cast<ContainedInLattice>(a | b);
+}
+
+class BoyerMoorePositionInfo : public ZoneObject {
+ public:
+ bool at(int i) const { return map_[i]; }
+
+ static constexpr int kMapSize = 128;
+ static constexpr int kMask = kMapSize - 1;
+
+ int map_count() const { return map_count_; }
+
+ void Set(int character);
+ void SetInterval(const Interval& interval);
+ void SetAll();
+
+ bool is_non_word() { return w_ == kLatticeOut; }
+ bool is_word() { return w_ == kLatticeIn; }
+
+ using Bitset = std::bitset<kMapSize>;
+ Bitset raw_bitset() const { return map_; }
+
+ private:
+ Bitset map_;
+ int map_count_ = 0; // Number of set bits in the map.
+ ContainedInLattice w_ = kNotYet; // The \w character class.
+};
+
+class BoyerMooreLookahead : public ZoneObject {
+ public:
+ BoyerMooreLookahead(int length, RegExpCompiler* compiler, Zone* zone);
+
+ int length() { return length_; }
+ int max_char() { return max_char_; }
+ RegExpCompiler* compiler() { return compiler_; }
+
+ int Count(int map_number) { return bitmaps_->at(map_number)->map_count(); }
+
+ BoyerMoorePositionInfo* at(int i) { return bitmaps_->at(i); }
+
+ void Set(int map_number, int character) {
+ if (character > max_char_) return;
+ BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
+ info->Set(character);
+ }
+
+ void SetInterval(int map_number, const Interval& interval) {
+ if (interval.from() > max_char_) return;
+ BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
+ if (interval.to() > max_char_) {
+ info->SetInterval(Interval(interval.from(), max_char_));
+ } else {
+ info->SetInterval(interval);
+ }
+ }
+
+ void SetAll(int map_number) { bitmaps_->at(map_number)->SetAll(); }
+
+ void SetRest(int from_map) {
+ for (int i = from_map; i < length_; i++) SetAll(i);
+ }
+ void EmitSkipInstructions(RegExpMacroAssembler* masm);
+
+ private:
+ // This is the value obtained by EatsAtLeast. If we do not have at least this
+ // many characters left in the sample string then the match is bound to fail.
+ // Therefore it is OK to read a character this far ahead of the current match
+ // point.
+ int length_;
+ RegExpCompiler* compiler_;
+ // 0xff for Latin1, 0xffff for UTF-16.
+ int max_char_;
+ ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
+
+ int GetSkipTable(int min_lookahead, int max_lookahead,
+ Handle<ByteArray> boolean_skip_table);
+ bool FindWorthwhileInterval(int* from, int* to);
+ int FindBestInterval(int max_number_of_chars, int old_biggest_points,
+ int* from, int* to);
+};
+
+// There are many ways to generate code for a node. This class encapsulates
+// the current way we should be generating. In other words it encapsulates
+// the current state of the code generator. The effect of this is that we
+// generate code for paths that the matcher can take through the regular
+// expression. A given node in the regexp can be code-generated several times
+// as it can be part of several traces. For example for the regexp:
+// /foo(bar|ip)baz/ the code to match baz will be generated twice, once as part
+// of the foo-bar-baz trace and once as part of the foo-ip-baz trace. The code
+// to match foo is generated only once (the traces have a common prefix). The
+// code to store the capture is deferred and generated (twice) after the places
+// where baz has been matched.
+class Trace {
+ public:
+ // A value for a property that is either known to be true, know to be false,
+ // or not known.
+ enum TriBool { UNKNOWN = -1, FALSE_VALUE = 0, TRUE_VALUE = 1 };
+
+ class DeferredAction {
+ public:
+ DeferredAction(ActionNode::ActionType action_type, int reg)
+ : action_type_(action_type), reg_(reg), next_(nullptr) {}
+ DeferredAction* next() { return next_; }
+ bool Mentions(int reg);
+ int reg() { return reg_; }
+ ActionNode::ActionType action_type() { return action_type_; }
+
+ private:
+ ActionNode::ActionType action_type_;
+ int reg_;
+ DeferredAction* next_;
+ friend class Trace;
+ };
+
+ class DeferredCapture : public DeferredAction {
+ public:
+ DeferredCapture(int reg, bool is_capture, Trace* trace)
+ : DeferredAction(ActionNode::STORE_POSITION, reg),
+ cp_offset_(trace->cp_offset()),
+ is_capture_(is_capture) {}
+ int cp_offset() { return cp_offset_; }
+ bool is_capture() { return is_capture_; }
+
+ private:
+ int cp_offset_;
+ bool is_capture_;
+ void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+ };
+
+ class DeferredSetRegister : public DeferredAction {
+ public:
+ DeferredSetRegister(int reg, int value)
+ : DeferredAction(ActionNode::SET_REGISTER, reg), value_(value) {}
+ int value() { return value_; }
+
+ private:
+ int value_;
+ };
+
+ class DeferredClearCaptures : public DeferredAction {
+ public:
+ explicit DeferredClearCaptures(Interval range)
+ : DeferredAction(ActionNode::CLEAR_CAPTURES, -1), range_(range) {}
+ Interval range() { return range_; }
+
+ private:
+ Interval range_;
+ };
+
+ class DeferredIncrementRegister : public DeferredAction {
+ public:
+ explicit DeferredIncrementRegister(int reg)
+ : DeferredAction(ActionNode::INCREMENT_REGISTER, reg) {}
+ };
+
+ Trace()
+ : cp_offset_(0),
+ actions_(nullptr),
+ backtrack_(nullptr),
+ stop_node_(nullptr),
+ loop_label_(nullptr),
+ characters_preloaded_(0),
+ bound_checked_up_to_(0),
+ flush_budget_(100),
+ at_start_(UNKNOWN) {}
+
+ // End the trace. This involves flushing the deferred actions in the trace
+ // and pushing a backtrack location onto the backtrack stack. Once this is
+ // done we can start a new trace or go to one that has already been
+ // generated.
+ void Flush(RegExpCompiler* compiler, RegExpNode* successor);
+ int cp_offset() { return cp_offset_; }
+ DeferredAction* actions() { return actions_; }
+ // A trivial trace is one that has no deferred actions or other state that
+ // affects the assumptions used when generating code. There is no recorded
+ // backtrack location in a trivial trace, so with a trivial trace we will
+ // generate code that, on a failure to match, gets the backtrack location
+ // from the backtrack stack rather than using a direct jump instruction. We
+ // always start code generation with a trivial trace and non-trivial traces
+ // are created as we emit code for nodes or add to the list of deferred
+ // actions in the trace. The location of the code generated for a node using
+ // a trivial trace is recorded in a label in the node so that gotos can be
+ // generated to that code.
+ bool is_trivial() {
+ return backtrack_ == nullptr && actions_ == nullptr && cp_offset_ == 0 &&
+ characters_preloaded_ == 0 && bound_checked_up_to_ == 0 &&
+ quick_check_performed_.characters() == 0 && at_start_ == UNKNOWN;
+ }
+ TriBool at_start() { return at_start_; }
+ void set_at_start(TriBool at_start) { at_start_ = at_start; }
+ Label* backtrack() { return backtrack_; }
+ Label* loop_label() { return loop_label_; }
+ RegExpNode* stop_node() { return stop_node_; }
+ int characters_preloaded() { return characters_preloaded_; }
+ int bound_checked_up_to() { return bound_checked_up_to_; }
+ int flush_budget() { return flush_budget_; }
+ QuickCheckDetails* quick_check_performed() { return &quick_check_performed_; }
+ bool mentions_reg(int reg);
+ // Returns true if a deferred position store exists to the specified
+ // register and stores the offset in the out-parameter. Otherwise
+ // returns false.
+ bool GetStoredPosition(int reg, int* cp_offset);
+ // These set methods and AdvanceCurrentPositionInTrace should be used only on
+ // new traces - the intention is that traces are immutable after creation.
+ void add_action(DeferredAction* new_action) {
+ DCHECK(new_action->next_ == nullptr);
+ new_action->next_ = actions_;
+ actions_ = new_action;
+ }
+ void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
+ void set_stop_node(RegExpNode* node) { stop_node_ = node; }
+ void set_loop_label(Label* label) { loop_label_ = label; }
+ void set_characters_preloaded(int count) { characters_preloaded_ = count; }
+ void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
+ void set_flush_budget(int to) { flush_budget_ = to; }
+ void set_quick_check_performed(QuickCheckDetails* d) {
+ quick_check_performed_ = *d;
+ }
+ void InvalidateCurrentCharacter();
+ void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
+
+ private:
+ int FindAffectedRegisters(DynamicBitSet* affected_registers, Zone* zone);
+ void PerformDeferredActions(RegExpMacroAssembler* macro, int max_register,
+ const DynamicBitSet& affected_registers,
+ DynamicBitSet* registers_to_pop,
+ DynamicBitSet* registers_to_clear, Zone* zone);
+ void RestoreAffectedRegisters(RegExpMacroAssembler* macro, int max_register,
+ const DynamicBitSet& registers_to_pop,
+ const DynamicBitSet& registers_to_clear);
+ int cp_offset_;
+ DeferredAction* actions_;
+ Label* backtrack_;
+ RegExpNode* stop_node_;
+ Label* loop_label_;
+ int characters_preloaded_;
+ int bound_checked_up_to_;
+ QuickCheckDetails quick_check_performed_;
+ int flush_budget_;
+ TriBool at_start_;
+};
+
+class GreedyLoopState {
+ public:
+ explicit GreedyLoopState(bool not_at_start);
+
+ Label* label() { return &label_; }
+ Trace* counter_backtrack_trace() { return &counter_backtrack_trace_; }
+
+ private:
+ Label label_;
+ Trace counter_backtrack_trace_;
+};
+
+struct PreloadState {
+ static const int kEatsAtLeastNotYetInitialized = -1;
+ bool preload_is_current_;
+ bool preload_has_checked_bounds_;
+ int preload_characters_;
+ int eats_at_least_;
+ void init() { eats_at_least_ = kEatsAtLeastNotYetInitialized; }
+};
+
+// Assertion propagation moves information about assertions such as
+// \b to the affected nodes. For instance, in /.\b./ information must
+// be propagated to the first '.' that whatever follows needs to know
+// if it matched a word or a non-word, and to the second '.' that it
+// has to check if it succeeds a word or non-word. In this case the
+// result will be something like:
+//
+// +-------+ +------------+
+// | . | | . |
+// +-------+ ---> +------------+
+// | word? | | check word |
+// +-------+ +------------+
+class Analysis : public NodeVisitor {
+ public:
+ Analysis(Isolate* isolate, bool is_one_byte)
+ : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
+ void EnsureAnalyzed(RegExpNode* node);
+
+#define DECLARE_VISIT(Type) void Visit##Type(Type##Node* that) override;
+ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ void VisitLoopChoice(LoopChoiceNode* that) override;
+
+ bool has_failed() { return error_message_ != nullptr; }
+ const char* error_message() {
+ DCHECK(error_message_ != nullptr);
+ return error_message_;
+ }
+ void fail(const char* error_message) { error_message_ = error_message; }
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ Isolate* isolate_;
+ bool is_one_byte_;
+ const char* error_message_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
+};
+
+class FrequencyCollator {
+ public:
+ FrequencyCollator() : total_samples_(0) {
+ for (int i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
+ frequencies_[i] = CharacterFrequency(i);
+ }
+ }
+
+ void CountCharacter(int character) {
+ int index = (character & RegExpMacroAssembler::kTableMask);
+ frequencies_[index].Increment();
+ total_samples_++;
+ }
+
+ // Does not measure in percent, but rather per-128 (the table size from the
+ // regexp macro assembler).
+ int Frequency(int in_character) {
+ DCHECK((in_character & RegExpMacroAssembler::kTableMask) == in_character);
+ if (total_samples_ < 1) return 1; // Division by zero.
+ int freq_in_per128 =
+ (frequencies_[in_character].counter() * 128) / total_samples_;
+ return freq_in_per128;
+ }
+
+ private:
+ class CharacterFrequency {
+ public:
+ CharacterFrequency() : counter_(0), character_(-1) {}
+ explicit CharacterFrequency(int character)
+ : counter_(0), character_(character) {}
+
+ void Increment() { counter_++; }
+ int counter() { return counter_; }
+ int character() { return character_; }
+
+ private:
+ int counter_;
+ int character_;
+ };
+
+ private:
+ CharacterFrequency frequencies_[RegExpMacroAssembler::kTableSize];
+ int total_samples_;
+};
+
+class RegExpCompiler {
+ public:
+ RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
+ bool is_one_byte);
+
+ int AllocateRegister() {
+ if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
+ reg_exp_too_big_ = true;
+ return next_register_;
+ }
+ return next_register_++;
+ }
+
+ // Lookarounds to match lone surrogates for unicode character class matches
+ // are never nested. We can therefore reuse registers.
+ int UnicodeLookaroundStackRegister() {
+ if (unicode_lookaround_stack_register_ == kNoRegister) {
+ unicode_lookaround_stack_register_ = AllocateRegister();
+ }
+ return unicode_lookaround_stack_register_;
+ }
+
+ int UnicodeLookaroundPositionRegister() {
+ if (unicode_lookaround_position_register_ == kNoRegister) {
+ unicode_lookaround_position_register_ = AllocateRegister();
+ }
+ return unicode_lookaround_position_register_;
+ }
+
+ struct CompilationResult final {
+ explicit CompilationResult(const char* error_message)
+ : error_message(error_message) {}
+ CompilationResult(Object code, int registers)
+ : code(code), num_registers(registers) {}
+
+ static CompilationResult RegExpTooBig() {
+ return CompilationResult("RegExp too big");
+ }
+
+ bool Succeeded() const { return error_message == nullptr; }
+
+ const char* const error_message = nullptr;
+ Object code;
+ int num_registers = 0;
+ };
+
+ CompilationResult Assemble(Isolate* isolate, RegExpMacroAssembler* assembler,
+ RegExpNode* start, int capture_count,
+ Handle<String> pattern);
+
+ // If the regexp matching starts within a surrogate pair, step back to the
+ // lead surrogate and start matching from there.
+ static RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
+
+ inline void AddWork(RegExpNode* node) {
+ if (!node->on_work_list() && !node->label()->is_bound()) {
+ node->set_on_work_list(true);
+ work_list_->push_back(node);
+ }
+ }
+
+ static const int kImplementationOffset = 0;
+ static const int kNumberOfRegistersOffset = 0;
+ static const int kCodeOffset = 1;
+
+ RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
+ EndNode* accept() { return accept_; }
+
+ static const int kMaxRecursion = 100;
+ inline int recursion_depth() { return recursion_depth_; }
+ inline void IncrementRecursionDepth() { recursion_depth_++; }
+ inline void DecrementRecursionDepth() { recursion_depth_--; }
+
+ void SetRegExpTooBig() { reg_exp_too_big_ = true; }
+
+ inline bool one_byte() { return one_byte_; }
+ inline bool optimize() { return optimize_; }
+ inline void set_optimize(bool value) { optimize_ = value; }
+ inline bool limiting_recursion() { return limiting_recursion_; }
+ inline void set_limiting_recursion(bool value) {
+ limiting_recursion_ = value;
+ }
+ bool read_backward() { return read_backward_; }
+ void set_read_backward(bool value) { read_backward_ = value; }
+ FrequencyCollator* frequency_collator() { return &frequency_collator_; }
+
+ int current_expansion_factor() { return current_expansion_factor_; }
+ void set_current_expansion_factor(int value) {
+ current_expansion_factor_ = value;
+ }
+
+ Isolate* isolate() const { return isolate_; }
+ Zone* zone() const { return zone_; }
+
+ static const int kNoRegister = -1;
+
+ private:
+ EndNode* accept_;
+ int next_register_;
+ int unicode_lookaround_stack_register_;
+ int unicode_lookaround_position_register_;
+ std::vector<RegExpNode*>* work_list_;
+ int recursion_depth_;
+ RegExpMacroAssembler* macro_assembler_;
+ bool one_byte_;
+ bool reg_exp_too_big_;
+ bool limiting_recursion_;
+ bool optimize_;
+ bool read_backward_;
+ int current_expansion_factor_;
+ FrequencyCollator frequency_collator_;
+ Isolate* isolate_;
+ Zone* zone_;
+};
+
+// Categorizes character ranges into BMP, non-BMP, lead, and trail surrogates.
+class UnicodeRangeSplitter {
+ public:
+ V8_EXPORT_PRIVATE UnicodeRangeSplitter(ZoneList<CharacterRange>* base);
+
+ static constexpr int kInitialSize = 8;
+ using CharacterRangeVector = base::SmallVector<CharacterRange, kInitialSize>;
+
+ const CharacterRangeVector* bmp() const { return &bmp_; }
+ const CharacterRangeVector* lead_surrogates() const {
+ return &lead_surrogates_;
+ }
+ const CharacterRangeVector* trail_surrogates() const {
+ return &trail_surrogates_;
+ }
+ const CharacterRangeVector* non_bmp() const { return &non_bmp_; }
+
+ private:
+ void AddRange(CharacterRange range);
+
+ CharacterRangeVector bmp_;
+ CharacterRangeVector lead_surrogates_;
+ CharacterRangeVector trail_surrogates_;
+ CharacterRangeVector non_bmp_;
+};
+
+// We need to check for the following characters: 0x39C 0x3BC 0x178.
+// TODO(jgruber): Move to CharacterRange.
+bool RangeContainsLatin1Equivalents(CharacterRange range);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_COMPILER_H_
diff --git a/deps/v8/src/regexp/regexp-dotprinter.cc b/deps/v8/src/regexp/regexp-dotprinter.cc
new file mode 100644
index 0000000000..a6d72aaf5b
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-dotprinter.cc
@@ -0,0 +1,244 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-dotprinter.h"
+
+#include "src/regexp/regexp-compiler.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------
+// Dot/dotty output
+
+#ifdef DEBUG
+
+class DotPrinterImpl : public NodeVisitor {
+ public:
+ explicit DotPrinterImpl(std::ostream& os) : os_(os) {}
+ void PrintNode(const char* label, RegExpNode* node);
+ void Visit(RegExpNode* node);
+ void PrintAttributes(RegExpNode* from);
+ void PrintOnFailure(RegExpNode* from, RegExpNode* to);
+#define DECLARE_VISIT(Type) virtual void Visit##Type(Type##Node* that);
+ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ private:
+ std::ostream& os_;
+};
+
+void DotPrinterImpl::PrintNode(const char* label, RegExpNode* node) {
+ os_ << "digraph G {\n graph [label=\"";
+ for (int i = 0; label[i]; i++) {
+ switch (label[i]) {
+ case '\\':
+ os_ << "\\\\";
+ break;
+ case '"':
+ os_ << "\"";
+ break;
+ default:
+ os_ << label[i];
+ break;
+ }
+ }
+ os_ << "\"];\n";
+ Visit(node);
+ os_ << "}" << std::endl;
+}
+
+void DotPrinterImpl::Visit(RegExpNode* node) {
+ if (node->info()->visited) return;
+ node->info()->visited = true;
+ node->Accept(this);
+}
+
+void DotPrinterImpl::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
+ os_ << " n" << from << " -> n" << on_failure << " [style=dotted];\n";
+ Visit(on_failure);
+}
+
+class AttributePrinter {
+ public:
+ explicit AttributePrinter(std::ostream& os) // NOLINT
+ : os_(os), first_(true) {}
+ void PrintSeparator() {
+ if (first_) {
+ first_ = false;
+ } else {
+ os_ << "|";
+ }
+ }
+ void PrintBit(const char* name, bool value) {
+ if (!value) return;
+ PrintSeparator();
+ os_ << "{" << name << "}";
+ }
+ void PrintPositive(const char* name, int value) {
+ if (value < 0) return;
+ PrintSeparator();
+ os_ << "{" << name << "|" << value << "}";
+ }
+
+ private:
+ std::ostream& os_;
+ bool first_;
+};
+
+void DotPrinterImpl::PrintAttributes(RegExpNode* that) {
+ os_ << " a" << that << " [shape=Mrecord, color=grey, fontcolor=grey, "
+ << "margin=0.1, fontsize=10, label=\"{";
+ AttributePrinter printer(os_);
+ NodeInfo* info = that->info();
+ printer.PrintBit("NI", info->follows_newline_interest);
+ printer.PrintBit("WI", info->follows_word_interest);
+ printer.PrintBit("SI", info->follows_start_interest);
+ Label* label = that->label();
+ if (label->is_bound()) printer.PrintPositive("@", label->pos());
+ os_ << "}\"];\n"
+ << " a" << that << " -> n" << that
+ << " [style=dashed, color=grey, arrowhead=none];\n";
+}
+
+void DotPrinterImpl::VisitChoice(ChoiceNode* that) {
+ os_ << " n" << that << " [shape=Mrecord, label=\"?\"];\n";
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ GuardedAlternative alt = that->alternatives()->at(i);
+ os_ << " n" << that << " -> n" << alt.node();
+ }
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ GuardedAlternative alt = that->alternatives()->at(i);
+ alt.node()->Accept(this);
+ }
+}
+
+void DotPrinterImpl::VisitText(TextNode* that) {
+ Zone* zone = that->zone();
+ os_ << " n" << that << " [label=\"";
+ for (int i = 0; i < that->elements()->length(); i++) {
+ if (i > 0) os_ << " ";
+ TextElement elm = that->elements()->at(i);
+ switch (elm.text_type()) {
+ case TextElement::ATOM: {
+ Vector<const uc16> data = elm.atom()->data();
+ for (int i = 0; i < data.length(); i++) {
+ os_ << static_cast<char>(data[i]);
+ }
+ break;
+ }
+ case TextElement::CHAR_CLASS: {
+ RegExpCharacterClass* node = elm.char_class();
+ os_ << "[";
+ if (node->is_negated()) os_ << "^";
+ for (int j = 0; j < node->ranges(zone)->length(); j++) {
+ CharacterRange range = node->ranges(zone)->at(j);
+ os_ << AsUC16(range.from()) << "-" << AsUC16(range.to());
+ }
+ os_ << "]";
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ os_ << "\", shape=box, peripheries=2];\n";
+ PrintAttributes(that);
+ os_ << " n" << that << " -> n" << that->on_success() << ";\n";
+ Visit(that->on_success());
+}
+
+void DotPrinterImpl::VisitBackReference(BackReferenceNode* that) {
+ os_ << " n" << that << " [label=\"$" << that->start_register() << "..$"
+ << that->end_register() << "\", shape=doubleoctagon];\n";
+ PrintAttributes(that);
+ os_ << " n" << that << " -> n" << that->on_success() << ";\n";
+ Visit(that->on_success());
+}
+
+void DotPrinterImpl::VisitEnd(EndNode* that) {
+ os_ << " n" << that << " [style=bold, shape=point];\n";
+ PrintAttributes(that);
+}
+
+void DotPrinterImpl::VisitAssertion(AssertionNode* that) {
+ os_ << " n" << that << " [";
+ switch (that->assertion_type()) {
+ case AssertionNode::AT_END:
+ os_ << "label=\"$\", shape=septagon";
+ break;
+ case AssertionNode::AT_START:
+ os_ << "label=\"^\", shape=septagon";
+ break;
+ case AssertionNode::AT_BOUNDARY:
+ os_ << "label=\"\\b\", shape=septagon";
+ break;
+ case AssertionNode::AT_NON_BOUNDARY:
+ os_ << "label=\"\\B\", shape=septagon";
+ break;
+ case AssertionNode::AFTER_NEWLINE:
+ os_ << "label=\"(?<=\\n)\", shape=septagon";
+ break;
+ }
+ os_ << "];\n";
+ PrintAttributes(that);
+ RegExpNode* successor = that->on_success();
+ os_ << " n" << that << " -> n" << successor << ";\n";
+ Visit(successor);
+}
+
+void DotPrinterImpl::VisitAction(ActionNode* that) {
+ os_ << " n" << that << " [";
+ switch (that->action_type_) {
+ case ActionNode::SET_REGISTER:
+ os_ << "label=\"$" << that->data_.u_store_register.reg
+ << ":=" << that->data_.u_store_register.value << "\", shape=octagon";
+ break;
+ case ActionNode::INCREMENT_REGISTER:
+ os_ << "label=\"$" << that->data_.u_increment_register.reg
+ << "++\", shape=octagon";
+ break;
+ case ActionNode::STORE_POSITION:
+ os_ << "label=\"$" << that->data_.u_position_register.reg
+ << ":=$pos\", shape=octagon";
+ break;
+ case ActionNode::BEGIN_SUBMATCH:
+ os_ << "label=\"$" << that->data_.u_submatch.current_position_register
+ << ":=$pos,begin\", shape=septagon";
+ break;
+ case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
+ os_ << "label=\"escape\", shape=septagon";
+ break;
+ case ActionNode::EMPTY_MATCH_CHECK:
+ os_ << "label=\"$" << that->data_.u_empty_match_check.start_register
+ << "=$pos?,$" << that->data_.u_empty_match_check.repetition_register
+ << "<" << that->data_.u_empty_match_check.repetition_limit
+ << "?\", shape=septagon";
+ break;
+ case ActionNode::CLEAR_CAPTURES: {
+ os_ << "label=\"clear $" << that->data_.u_clear_captures.range_from
+ << " to $" << that->data_.u_clear_captures.range_to
+ << "\", shape=septagon";
+ break;
+ }
+ }
+ os_ << "];\n";
+ PrintAttributes(that);
+ RegExpNode* successor = that->on_success();
+ os_ << " n" << that << " -> n" << successor << ";\n";
+ Visit(successor);
+}
+
+#endif // DEBUG
+
+void DotPrinter::DotPrint(const char* label, RegExpNode* node) {
+#ifdef DEBUG
+ StdoutStream os;
+ DotPrinterImpl printer(os);
+ printer.PrintNode(label, node);
+#endif // DEBUG
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-dotprinter.h b/deps/v8/src/regexp/regexp-dotprinter.h
new file mode 100644
index 0000000000..d9c75fc1f2
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-dotprinter.h
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_DOTPRINTER_H_
+#define V8_REGEXP_REGEXP_DOTPRINTER_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class RegExpNode;
+
+class DotPrinter final : public AllStatic {
+ public:
+ static void DotPrint(const char* label, RegExpNode* node);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_DOTPRINTER_H_
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index 04bb63ee7a..881758861c 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -4,13 +4,14 @@
// A simple interpreter for the Irregexp byte code.
-#include "src/regexp/interpreter-irregexp.h"
+#include "src/regexp/regexp-interpreter.h"
#include "src/ast/ast.h"
+#include "src/base/small-vector.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/bytecodes-irregexp.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp.h"
#include "src/strings/unicode.h"
#include "src/utils/utils.h"
@@ -33,7 +34,6 @@ static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
offset_a, offset_b, length, unicode ? nullptr : isolate) == 1;
}
-
static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
int len, Vector<const uint8_t> subject,
bool unicode) {
@@ -55,28 +55,19 @@ static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
return true;
}
-
#ifdef DEBUG
-static void TraceInterpreter(const byte* code_base,
- const byte* pc,
- int stack_depth,
- int current_position,
- uint32_t current_char,
- int bytecode_length,
+static void TraceInterpreter(const byte* code_base, const byte* pc,
+ int stack_depth, int current_position,
+ uint32_t current_char, int bytecode_length,
const char* bytecode_name) {
if (FLAG_trace_regexp_bytecodes) {
bool printable = (current_char < 127 && current_char >= 32);
const char* format =
- printable ?
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s" :
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
- PrintF(format,
- pc - code_base,
- stack_depth,
- current_position,
- current_char,
- printable ? current_char : '.',
- bytecode_name);
+ printable
+ ? "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s"
+ : "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
+ PrintF(format, pc - code_base, stack_depth, current_position, current_char,
+ printable ? current_char : '.', bytecode_name);
for (int i = 0; i < bytecode_length; i++) {
printf(", %02x", pc[i]);
}
@@ -93,54 +84,57 @@ static void TraceInterpreter(const byte* code_base,
}
}
-
-#define BYTECODE(name) \
- case BC_##name: \
- TraceInterpreter(code_base, \
- pc, \
- static_cast<int>(backtrack_sp - backtrack_stack_base), \
- current, \
- current_char, \
- BC_##name##_LENGTH, \
- #name);
+#define BYTECODE(name) \
+ case BC_##name: \
+ TraceInterpreter(code_base, pc, backtrack_stack.sp(), current, \
+ current_char, BC_##name##_LENGTH, #name);
#else
-#define BYTECODE(name) \
- case BC_##name:
+#define BYTECODE(name) case BC_##name:
#endif
-
static int32_t Load32Aligned(const byte* pc) {
DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 3);
- return *reinterpret_cast<const int32_t *>(pc);
+ return *reinterpret_cast<const int32_t*>(pc);
}
-
static int32_t Load16Aligned(const byte* pc) {
DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 1);
- return *reinterpret_cast<const uint16_t *>(pc);
+ return *reinterpret_cast<const uint16_t*>(pc);
}
-
// A simple abstraction over the backtracking stack used by the interpreter.
-// This backtracking stack does not grow automatically, but it ensures that the
-// the memory held by the stack is released or remembered in a cache if the
-// matching terminates.
+//
+// Despite the name 'backtracking' stack, it's actually used as a generic stack
+// that stores both program counters (= offsets into the bytecode) and generic
+// integer values.
class BacktrackStack {
public:
- BacktrackStack() { data_ = NewArray<int>(kBacktrackStackSize); }
+ BacktrackStack() = default;
- ~BacktrackStack() {
- DeleteArray(data_);
+ void push(int v) { data_.emplace_back(v); }
+ int peek() const {
+ DCHECK(!data_.empty());
+ return data_.back();
+ }
+ int pop() {
+ int v = peek();
+ data_.pop_back();
+ return v;
}
- int* data() const { return data_; }
-
- int max_size() const { return kBacktrackStackSize; }
+ // The 'sp' is the index of the first empty element in the stack.
+ int sp() const { return static_cast<int>(data_.size()); }
+ void set_sp(int new_sp) {
+ DCHECK_LE(new_sp, sp());
+ data_.resize_no_init(new_sp);
+ }
private:
- static const int kBacktrackStackSize = 10000;
+ // Semi-arbitrary. Should be large enough for common cases to remain in the
+ // static stack-allocated backing store, but small enough not to waste space.
+ static constexpr int kStaticCapacity = 64;
- int* data_;
+ base::SmallVector<int, kStaticCapacity> data_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
@@ -163,28 +157,30 @@ IrregexpInterpreter::Result HandleInterrupts(Isolate* isolate,
StackLimitCheck check(isolate);
if (check.JsHasOverflowed()) {
- // A real stack overflow.
- return StackOverflow(isolate);
+ return StackOverflow(isolate); // A real stack overflow.
}
- const bool was_one_byte =
- String::IsOneByteRepresentationUnderneath(*subject_string);
+ // Handle interrupts if any exist.
+ if (check.InterruptRequested()) {
+ const bool was_one_byte =
+ String::IsOneByteRepresentationUnderneath(*subject_string);
- Object result;
- {
- AllowHeapAllocation yes_gc;
- result = isolate->stack_guard()->HandleInterrupts();
- }
+ Object result;
+ {
+ AllowHeapAllocation yes_gc;
+ result = isolate->stack_guard()->HandleInterrupts();
+ }
- if (result.IsException(isolate)) {
- return IrregexpInterpreter::EXCEPTION;
- }
+ if (result.IsException(isolate)) {
+ return IrregexpInterpreter::EXCEPTION;
+ }
- // If we changed between a LATIN1 and a UC16 string, we need to restart
- // regexp matching with the appropriate template instantiation of RawMatch.
- if (String::IsOneByteRepresentationUnderneath(*subject_string) !=
- was_one_byte) {
- return IrregexpInterpreter::RETRY;
+ // If we changed between a LATIN1 and a UC16 string, we need to restart
+ // regexp matching with the appropriate template instantiation of RawMatch.
+ if (String::IsOneByteRepresentationUnderneath(*subject_string) !=
+ was_one_byte) {
+ return IrregexpInterpreter::RETRY;
+ }
}
return IrregexpInterpreter::SUCCESS;
@@ -221,121 +217,108 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
const byte* pc = code_array->GetDataStartAddress();
const byte* code_base = pc;
- // BacktrackStack ensures that the memory allocated for the backtracking stack
- // is returned to the system or cached if there is no stack being cached at
- // the moment.
BacktrackStack backtrack_stack;
- int* backtrack_stack_base = backtrack_stack.data();
- int* backtrack_sp = backtrack_stack_base;
- int backtrack_stack_space = backtrack_stack.max_size();
+
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
PrintF("\n\nStart bytecode interpreter\n\n");
}
#endif
while (true) {
- int32_t insn = Load32Aligned(pc);
+ const int32_t insn = Load32Aligned(pc);
switch (insn & BYTECODE_MASK) {
- BYTECODE(BREAK)
- UNREACHABLE();
- BYTECODE(PUSH_CP)
- if (--backtrack_stack_space < 0) {
- return StackOverflow(isolate);
- }
- *backtrack_sp++ = current;
+ BYTECODE(BREAK) { UNREACHABLE(); }
+ BYTECODE(PUSH_CP) {
+ backtrack_stack.push(current);
pc += BC_PUSH_CP_LENGTH;
break;
- BYTECODE(PUSH_BT)
- if (--backtrack_stack_space < 0) {
- return StackOverflow(isolate);
- }
- *backtrack_sp++ = Load32Aligned(pc + 4);
+ }
+ BYTECODE(PUSH_BT) {
+ backtrack_stack.push(Load32Aligned(pc + 4));
pc += BC_PUSH_BT_LENGTH;
break;
- BYTECODE(PUSH_REGISTER)
- if (--backtrack_stack_space < 0) {
- return StackOverflow(isolate);
- }
- *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
+ }
+ BYTECODE(PUSH_REGISTER) {
+ backtrack_stack.push(registers[insn >> BYTECODE_SHIFT]);
pc += BC_PUSH_REGISTER_LENGTH;
break;
- BYTECODE(SET_REGISTER)
+ }
+ BYTECODE(SET_REGISTER) {
registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
pc += BC_SET_REGISTER_LENGTH;
break;
- BYTECODE(ADVANCE_REGISTER)
+ }
+ BYTECODE(ADVANCE_REGISTER) {
registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
pc += BC_ADVANCE_REGISTER_LENGTH;
break;
- BYTECODE(SET_REGISTER_TO_CP)
+ }
+ BYTECODE(SET_REGISTER_TO_CP) {
registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
pc += BC_SET_REGISTER_TO_CP_LENGTH;
break;
- BYTECODE(SET_CP_TO_REGISTER)
+ }
+ BYTECODE(SET_CP_TO_REGISTER) {
current = registers[insn >> BYTECODE_SHIFT];
pc += BC_SET_CP_TO_REGISTER_LENGTH;
break;
- BYTECODE(SET_REGISTER_TO_SP)
- registers[insn >> BYTECODE_SHIFT] =
- static_cast<int>(backtrack_sp - backtrack_stack_base);
+ }
+ BYTECODE(SET_REGISTER_TO_SP) {
+ registers[insn >> BYTECODE_SHIFT] = backtrack_stack.sp();
pc += BC_SET_REGISTER_TO_SP_LENGTH;
break;
- BYTECODE(SET_SP_TO_REGISTER)
- backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
- backtrack_stack_space = backtrack_stack.max_size() -
- static_cast<int>(backtrack_sp - backtrack_stack_base);
+ }
+ BYTECODE(SET_SP_TO_REGISTER) {
+ backtrack_stack.set_sp(registers[insn >> BYTECODE_SHIFT]);
pc += BC_SET_SP_TO_REGISTER_LENGTH;
break;
- BYTECODE(POP_CP)
- backtrack_stack_space++;
- --backtrack_sp;
- current = *backtrack_sp;
+ }
+ BYTECODE(POP_CP) {
+ current = backtrack_stack.pop();
pc += BC_POP_CP_LENGTH;
break;
- // clang-format off
+ }
BYTECODE(POP_BT) {
- IrregexpInterpreter::Result return_code = HandleInterrupts(
- isolate, subject_string);
+ IrregexpInterpreter::Result return_code =
+ HandleInterrupts(isolate, subject_string);
if (return_code != IrregexpInterpreter::SUCCESS) return return_code;
UpdateCodeAndSubjectReferences(isolate, code_array, subject_string,
- &code_base, &pc, &subject);
+ &code_base, &pc, &subject);
- backtrack_stack_space++;
- --backtrack_sp;
- pc = code_base + *backtrack_sp;
+ pc = code_base + backtrack_stack.pop();
break;
}
- BYTECODE(POP_REGISTER) // clang-format on
- backtrack_stack_space++;
- --backtrack_sp;
- registers[insn >> BYTECODE_SHIFT] = *backtrack_sp;
+ BYTECODE(POP_REGISTER) {
+ registers[insn >> BYTECODE_SHIFT] = backtrack_stack.pop();
pc += BC_POP_REGISTER_LENGTH;
break;
- BYTECODE(FAIL)
- return IrregexpInterpreter::FAILURE;
- BYTECODE(SUCCEED)
- return IrregexpInterpreter::SUCCESS;
- BYTECODE(ADVANCE_CP)
+ }
+ BYTECODE(FAIL) { return IrregexpInterpreter::FAILURE; }
+ BYTECODE(SUCCEED) { return IrregexpInterpreter::SUCCESS; }
+ BYTECODE(ADVANCE_CP) {
current += insn >> BYTECODE_SHIFT;
pc += BC_ADVANCE_CP_LENGTH;
break;
- BYTECODE(GOTO)
+ }
+ BYTECODE(GOTO) {
pc = code_base + Load32Aligned(pc + 4);
break;
- BYTECODE(ADVANCE_CP_AND_GOTO)
+ }
+ BYTECODE(ADVANCE_CP_AND_GOTO) {
current += insn >> BYTECODE_SHIFT;
pc = code_base + Load32Aligned(pc + 4);
break;
- BYTECODE(CHECK_GREEDY)
- if (current == backtrack_sp[-1]) {
- backtrack_sp--;
- backtrack_stack_space++;
+ }
+ BYTECODE(CHECK_GREEDY) {
+ if (current == backtrack_stack.peek()) {
+ backtrack_stack.pop();
pc = code_base + Load32Aligned(pc + 4);
} else {
pc += BC_CHECK_GREEDY_LENGTH;
}
break;
+ }
BYTECODE(LOAD_CURRENT_CHAR) {
int pos = current + (insn >> BYTECODE_SHIFT);
if (pos >= subject.length() || pos < 0) {
@@ -380,10 +363,8 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
Char next1 = subject[pos + 1];
Char next2 = subject[pos + 2];
Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
+ current_char =
+ (subject[pos] | (next1 << 8) | (next2 << 16) | (next3 << 24));
pc += BC_LOAD_4_CURRENT_CHARS_LENGTH;
}
break;
@@ -394,10 +375,8 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
Char next1 = subject[pos + 1];
Char next2 = subject[pos + 2];
Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
+ current_char =
+ (subject[pos] | (next1 << 8) | (next2 << 16) | (next3 << 24));
pc += BC_LOAD_4_CURRENT_CHARS_UNCHECKED_LENGTH;
break;
}
@@ -533,28 +512,31 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
}
break;
}
- BYTECODE(CHECK_REGISTER_LT)
+ BYTECODE(CHECK_REGISTER_LT) {
if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
pc = code_base + Load32Aligned(pc + 8);
} else {
pc += BC_CHECK_REGISTER_LT_LENGTH;
}
break;
- BYTECODE(CHECK_REGISTER_GE)
+ }
+ BYTECODE(CHECK_REGISTER_GE) {
if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
pc = code_base + Load32Aligned(pc + 8);
} else {
pc += BC_CHECK_REGISTER_GE_LENGTH;
}
break;
- BYTECODE(CHECK_REGISTER_EQ_POS)
+ }
+ BYTECODE(CHECK_REGISTER_EQ_POS) {
if (registers[insn >> BYTECODE_SHIFT] == current) {
pc = code_base + Load32Aligned(pc + 4);
} else {
pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
}
break;
- BYTECODE(CHECK_NOT_REGS_EQUAL)
+ }
+ BYTECODE(CHECK_NOT_REGS_EQUAL) {
if (registers[insn >> BYTECODE_SHIFT] ==
registers[Load32Aligned(pc + 4)]) {
pc += BC_CHECK_NOT_REGS_EQUAL_LENGTH;
@@ -562,6 +544,7 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
pc = code_base + Load32Aligned(pc + 8);
}
break;
+ }
BYTECODE(CHECK_NOT_BACK_REF) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
@@ -628,20 +611,22 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
pc += BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD_LENGTH;
break;
}
- BYTECODE(CHECK_AT_START)
+ BYTECODE(CHECK_AT_START) {
if (current == 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
pc += BC_CHECK_AT_START_LENGTH;
}
break;
- BYTECODE(CHECK_NOT_AT_START)
+ }
+ BYTECODE(CHECK_NOT_AT_START) {
if (current + (insn >> BYTECODE_SHIFT) == 0) {
pc += BC_CHECK_NOT_AT_START_LENGTH;
} else {
pc = code_base + Load32Aligned(pc + 4);
}
break;
+ }
BYTECODE(SET_CURRENT_POSITION_FROM_END) {
int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
if (subject.length() - current > by) {
@@ -658,6 +643,8 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
}
}
+#undef BYTECODE
+
} // namespace
// static
diff --git a/deps/v8/src/regexp/interpreter-irregexp.h b/deps/v8/src/regexp/regexp-interpreter.h
index a57d40854e..ad27dcd296 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -4,20 +4,22 @@
// A simple interpreter for the Irregexp byte code.
-#ifndef V8_REGEXP_INTERPRETER_IRREGEXP_H_
-#define V8_REGEXP_INTERPRETER_IRREGEXP_H_
+#ifndef V8_REGEXP_REGEXP_INTERPRETER_H_
+#define V8_REGEXP_REGEXP_INTERPRETER_H_
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
class V8_EXPORT_PRIVATE IrregexpInterpreter {
public:
- enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
- STATIC_ASSERT(EXCEPTION == static_cast<int>(RegExpImpl::RE_EXCEPTION));
- STATIC_ASSERT(FAILURE == static_cast<int>(RegExpImpl::RE_FAILURE));
- STATIC_ASSERT(SUCCESS == static_cast<int>(RegExpImpl::RE_SUCCESS));
+ enum Result {
+ FAILURE = RegExp::kInternalRegExpFailure,
+ SUCCESS = RegExp::kInternalRegExpSuccess,
+ EXCEPTION = RegExp::kInternalRegExpException,
+ RETRY = RegExp::kInternalRegExpRetry,
+ };
// The caller is responsible for initializing registers before each call.
static Result Match(Isolate* isolate, Handle<ByteArray> code_array,
@@ -28,4 +30,4 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter {
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_INTERPRETER_IRREGEXP_H_
+#endif // V8_REGEXP_REGEXP_INTERPRETER_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
new file mode 100644
index 0000000000..2dc6739e42
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_ARCH_H_
+#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_ARCH_H_
+
+#include "src/regexp/regexp-macro-assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/regexp/x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/regexp/arm/regexp-macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/regexp/mips/regexp-macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_ARCH_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index cfe827ef4e..68fa16db61 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -133,7 +133,8 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
Isolate* isolate, int start_index, bool is_direct_call,
Address* return_address, Code re_code, Address* subject,
const byte** input_start, const byte** input_end) {
- AllowHeapAllocation allow_allocation;
+ DisallowHeapAllocation no_gc;
+
DCHECK(re_code.raw_instruction_start() <= *return_address);
DCHECK(*return_address <= re_code.raw_instruction_end());
int return_value = 0;
@@ -154,15 +155,15 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
// forcing the call through the runtime system.
return_value = js_has_overflowed ? EXCEPTION : RETRY;
} else if (js_has_overflowed) {
+ AllowHeapAllocation yes_gc;
isolate->StackOverflow();
return_value = EXCEPTION;
- } else {
+ } else if (check.InterruptRequested()) {
+ AllowHeapAllocation yes_gc;
Object result = isolate->stack_guard()->HandleInterrupts();
if (result.IsException(isolate)) return_value = EXCEPTION;
}
- DisallowHeapAllocation no_gc;
-
if (*code_handle != re_code) { // Return address no longer valid
intptr_t delta = code_handle->address() - re_code.address();
// Overwrite the return address on the stack.
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 8626d1a19e..b55ac13590 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -7,6 +7,7 @@
#include "src/codegen/label.h"
#include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -206,7 +207,12 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
// FAILURE: Matching failed.
// SUCCESS: Matching succeeded, and the output array has been filled with
// capture positions.
- enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
+ enum Result {
+ FAILURE = RegExp::kInternalRegExpFailure,
+ SUCCESS = RegExp::kInternalRegExpSuccess,
+ EXCEPTION = RegExp::kInternalRegExpException,
+ RETRY = RegExp::kInternalRegExpRetry,
+ };
NativeRegExpMacroAssembler(Isolate* isolate, Zone* zone);
~NativeRegExpMacroAssembler() override;
diff --git a/deps/v8/src/regexp/regexp-nodes.h b/deps/v8/src/regexp/regexp-nodes.h
new file mode 100644
index 0000000000..4c13b74926
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-nodes.h
@@ -0,0 +1,675 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_NODES_H_
+#define V8_REGEXP_REGEXP_NODES_H_
+
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class AlternativeGenerationList;
+class BoyerMooreLookahead;
+class GreedyLoopState;
+class Label;
+class NodeVisitor;
+class QuickCheckDetails;
+class RegExpCompiler;
+class Trace;
+struct PreloadState;
+
+#define FOR_EACH_NODE_TYPE(VISIT) \
+ VISIT(End) \
+ VISIT(Action) \
+ VISIT(Choice) \
+ VISIT(BackReference) \
+ VISIT(Assertion) \
+ VISIT(Text)
+
+struct NodeInfo final {
+ NodeInfo()
+ : being_analyzed(false),
+ been_analyzed(false),
+ follows_word_interest(false),
+ follows_newline_interest(false),
+ follows_start_interest(false),
+ at_end(false),
+ visited(false),
+ replacement_calculated(false) {}
+
+ // Returns true if the interests and assumptions of this node
+ // matches the given one.
+ bool Matches(NodeInfo* that) {
+ return (at_end == that->at_end) &&
+ (follows_word_interest == that->follows_word_interest) &&
+ (follows_newline_interest == that->follows_newline_interest) &&
+ (follows_start_interest == that->follows_start_interest);
+ }
+
+ // Updates the interests of this node given the interests of the
+ // node preceding it.
+ void AddFromPreceding(NodeInfo* that) {
+ at_end |= that->at_end;
+ follows_word_interest |= that->follows_word_interest;
+ follows_newline_interest |= that->follows_newline_interest;
+ follows_start_interest |= that->follows_start_interest;
+ }
+
+ bool HasLookbehind() {
+ return follows_word_interest || follows_newline_interest ||
+ follows_start_interest;
+ }
+
+ // Sets the interests of this node to include the interests of the
+ // following node.
+ void AddFromFollowing(NodeInfo* that) {
+ follows_word_interest |= that->follows_word_interest;
+ follows_newline_interest |= that->follows_newline_interest;
+ follows_start_interest |= that->follows_start_interest;
+ }
+
+ void ResetCompilationState() {
+ being_analyzed = false;
+ been_analyzed = false;
+ }
+
+ bool being_analyzed : 1;
+ bool been_analyzed : 1;
+
+ // These bits are set of this node has to know what the preceding
+ // character was.
+ bool follows_word_interest : 1;
+ bool follows_newline_interest : 1;
+ bool follows_start_interest : 1;
+
+ bool at_end : 1;
+ bool visited : 1;
+ bool replacement_calculated : 1;
+};
+
+class RegExpNode : public ZoneObject {
+ public:
+ explicit RegExpNode(Zone* zone)
+ : replacement_(nullptr),
+ on_work_list_(false),
+ trace_count_(0),
+ zone_(zone) {
+ bm_info_[0] = bm_info_[1] = nullptr;
+ }
+ virtual ~RegExpNode();
+ virtual void Accept(NodeVisitor* visitor) = 0;
+ // Generates a goto to this node or actually generates the code at this point.
+ virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
+ // How many characters must this node consume at a minimum in order to
+ // succeed. If we have found at least 'still_to_find' characters that
+ // must be consumed there is no need to ask any following nodes whether
+ // they are sure to eat any more characters. The not_at_start argument is
+ // used to indicate that we know we are not at the start of the input. In
+ // this case anchored branches will always fail and can be ignored when
+ // determining how many characters are consumed on success.
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
+ // Emits some quick code that checks whether the preloaded characters match.
+ // Falls through on certain failure, jumps to the label on possible success.
+ // If the node cannot make a quick check it does nothing and returns false.
+ bool EmitQuickCheck(RegExpCompiler* compiler, Trace* bounds_check_trace,
+ Trace* trace, bool preload_has_checked_bounds,
+ Label* on_possible_success,
+ QuickCheckDetails* details_return,
+ bool fall_through_on_failure);
+ // For a given number of characters this returns a mask and a value. The
+ // next n characters are anded with the mask and compared with the value.
+ // A comparison failure indicates the node cannot match the next n characters.
+ // A comparison success indicates the node may match.
+ virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) = 0;
+ static const int kNodeIsTooComplexForGreedyLoops = kMinInt;
+ virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
+ // Only returns the successor for a text node of length 1 that matches any
+ // character and that has no guards on it.
+ virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) {
+ return nullptr;
+ }
+
+ // Collects information on the possible code units (mod 128) that can match if
+ // we look forward. This is used for a Boyer-Moore-like string searching
+ // implementation. TODO(erikcorry): This should share more code with
+ // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
+ // the number of nodes we are willing to look at in order to create this data.
+ static const int kRecursionBudget = 200;
+ bool KeepRecursing(RegExpCompiler* compiler);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ UNREACHABLE();
+ }
+
+ // If we know that the input is one-byte then there are some nodes that can
+ // never match. This method returns a node that can be substituted for
+ // itself, or nullptr if the node can never match.
+ virtual RegExpNode* FilterOneByte(int depth) { return this; }
+ // Helper for FilterOneByte.
+ RegExpNode* replacement() {
+ DCHECK(info()->replacement_calculated);
+ return replacement_;
+ }
+ RegExpNode* set_replacement(RegExpNode* replacement) {
+ info()->replacement_calculated = true;
+ replacement_ = replacement;
+ return replacement; // For convenience.
+ }
+
+ // We want to avoid recalculating the lookahead info, so we store it on the
+ // node. Only info that is for this node is stored. We can tell that the
+ // info is for this node when offset == 0, so the information is calculated
+ // relative to this node.
+ void SaveBMInfo(BoyerMooreLookahead* bm, bool not_at_start, int offset) {
+ if (offset == 0) set_bm_info(not_at_start, bm);
+ }
+
+ Label* label() { return &label_; }
+ // If non-generic code is generated for a node (i.e. the node is not at the
+ // start of the trace) then it cannot be reused. This variable sets a limit
+ // on how often we allow that to happen before we insist on starting a new
+ // trace and generating generic code for a node that can be reused by flushing
+ // the deferred actions in the current trace and generating a goto.
+ static const int kMaxCopiesCodeGenerated = 10;
+
+ bool on_work_list() { return on_work_list_; }
+ void set_on_work_list(bool value) { on_work_list_ = value; }
+
+ NodeInfo* info() { return &info_; }
+
+ BoyerMooreLookahead* bm_info(bool not_at_start) {
+ return bm_info_[not_at_start ? 1 : 0];
+ }
+
+ Zone* zone() const { return zone_; }
+
+ protected:
+ enum LimitResult { DONE, CONTINUE };
+ RegExpNode* replacement_;
+
+ LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
+
+ void set_bm_info(bool not_at_start, BoyerMooreLookahead* bm) {
+ bm_info_[not_at_start ? 1 : 0] = bm;
+ }
+
+ private:
+ static const int kFirstCharBudget = 10;
+ Label label_;
+ bool on_work_list_;
+ NodeInfo info_;
+ // This variable keeps track of how many times code has been generated for
+ // this node (in different traces). We don't keep track of where the
+ // generated code is located unless the code is generated at the start of
+ // a trace, in which case it is generic and can be reused by flushing the
+ // deferred operations in the current trace and generating a goto.
+ int trace_count_;
+ BoyerMooreLookahead* bm_info_[2];
+
+ Zone* zone_;
+};
+
+class SeqRegExpNode : public RegExpNode {
+ public:
+ explicit SeqRegExpNode(RegExpNode* on_success)
+ : RegExpNode(on_success->zone()), on_success_(on_success) {}
+ RegExpNode* on_success() { return on_success_; }
+ void set_on_success(RegExpNode* node) { on_success_ = node; }
+ RegExpNode* FilterOneByte(int depth) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
+ on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ if (offset == 0) set_bm_info(not_at_start, bm);
+ }
+
+ protected:
+ RegExpNode* FilterSuccessor(int depth);
+
+ private:
+ RegExpNode* on_success_;
+};
+
+class ActionNode : public SeqRegExpNode {
+ public:
+ enum ActionType {
+ SET_REGISTER,
+ INCREMENT_REGISTER,
+ STORE_POSITION,
+ BEGIN_SUBMATCH,
+ POSITIVE_SUBMATCH_SUCCESS,
+ EMPTY_MATCH_CHECK,
+ CLEAR_CAPTURES
+ };
+ static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
+ static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
+ static ActionNode* StorePosition(int reg, bool is_capture,
+ RegExpNode* on_success);
+ static ActionNode* ClearCaptures(Interval range, RegExpNode* on_success);
+ static ActionNode* BeginSubmatch(int stack_pointer_reg, int position_reg,
+ RegExpNode* on_success);
+ static ActionNode* PositiveSubmatchSuccess(int stack_pointer_reg,
+ int restore_reg,
+ int clear_capture_count,
+ int clear_capture_from,
+ RegExpNode* on_success);
+ static ActionNode* EmptyMatchCheck(int start_register,
+ int repetition_register,
+ int repetition_limit,
+ RegExpNode* on_success);
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) override {
+ return on_success()->GetQuickCheckDetails(details, compiler, filled_in,
+ not_at_start);
+ }
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ ActionType action_type() { return action_type_; }
+ // TODO(erikcorry): We should allow some action nodes in greedy loops.
+ int GreedyLoopTextLength() override {
+ return kNodeIsTooComplexForGreedyLoops;
+ }
+
+ private:
+ union {
+ struct {
+ int reg;
+ int value;
+ } u_store_register;
+ struct {
+ int reg;
+ } u_increment_register;
+ struct {
+ int reg;
+ bool is_capture;
+ } u_position_register;
+ struct {
+ int stack_pointer_register;
+ int current_position_register;
+ int clear_register_count;
+ int clear_register_from;
+ } u_submatch;
+ struct {
+ int start_register;
+ int repetition_register;
+ int repetition_limit;
+ } u_empty_match_check;
+ struct {
+ int range_from;
+ int range_to;
+ } u_clear_captures;
+ } data_;
+ ActionNode(ActionType action_type, RegExpNode* on_success)
+ : SeqRegExpNode(on_success), action_type_(action_type) {}
+ ActionType action_type_;
+ friend class DotPrinterImpl;
+};
+
+class TextNode : public SeqRegExpNode {
+ public:
+ TextNode(ZoneList<TextElement>* elms, bool read_backward,
+ RegExpNode* on_success)
+ : SeqRegExpNode(on_success), elms_(elms), read_backward_(read_backward) {}
+ TextNode(RegExpCharacterClass* that, bool read_backward,
+ RegExpNode* on_success)
+ : SeqRegExpNode(on_success),
+ elms_(new (zone()) ZoneList<TextElement>(1, zone())),
+ read_backward_(read_backward) {
+ elms_->Add(TextElement::CharClass(that), zone());
+ }
+ // Create TextNode for a single character class for the given ranges.
+ static TextNode* CreateForCharacterRanges(Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
+ // Create TextNode for a surrogate pair with a range given for the
+ // lead and the trail surrogate each.
+ static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+ CharacterRange trail,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ ZoneList<TextElement>* elements() { return elms_; }
+ bool read_backward() { return read_backward_; }
+ void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
+ int GreedyLoopTextLength() override;
+ RegExpNode* GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ void CalculateOffsets();
+ RegExpNode* FilterOneByte(int depth) override;
+
+ private:
+ enum TextEmitPassType {
+ NON_LATIN1_MATCH, // Check for characters that can't match.
+ SIMPLE_CHARACTER_MATCH, // Case-dependent single character check.
+ NON_LETTER_CHARACTER_MATCH, // Check characters that have no case equivs.
+ CASE_CHARACTER_MATCH, // Case-independent single character check.
+ CHARACTER_CLASS_MATCH // Character class.
+ };
+ static bool SkipPass(TextEmitPassType pass, bool ignore_case);
+ static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
+ static const int kLastPass = CHARACTER_CLASS_MATCH;
+ void TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
+ bool preloaded, Trace* trace, bool first_element_checked,
+ int* checked_up_to);
+ int Length();
+ ZoneList<TextElement>* elms_;
+ bool read_backward_;
+};
+
+class AssertionNode : public SeqRegExpNode {
+ public:
+ enum AssertionType {
+ AT_END,
+ AT_START,
+ AT_BOUNDARY,
+ AT_NON_BOUNDARY,
+ AFTER_NEWLINE
+ };
+ static AssertionNode* AtEnd(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_END, on_success);
+ }
+ static AssertionNode* AtStart(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_START, on_success);
+ }
+ static AssertionNode* AtBoundary(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_BOUNDARY, on_success);
+ }
+ static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_NON_BOUNDARY, on_success);
+ }
+ static AssertionNode* AfterNewline(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
+ }
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ AssertionType assertion_type() { return assertion_type_; }
+
+ private:
+ void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
+ enum IfPrevious { kIsNonWord, kIsWord };
+ void BacktrackIfPrevious(RegExpCompiler* compiler, Trace* trace,
+ IfPrevious backtrack_if_previous);
+ AssertionNode(AssertionType t, RegExpNode* on_success)
+ : SeqRegExpNode(on_success), assertion_type_(t) {}
+ AssertionType assertion_type_;
+};
+
+class BackReferenceNode : public SeqRegExpNode {
+ public:
+ BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
+ bool read_backward, RegExpNode* on_success)
+ : SeqRegExpNode(on_success),
+ start_reg_(start_reg),
+ end_reg_(end_reg),
+ flags_(flags),
+ read_backward_(read_backward) {}
+ void Accept(NodeVisitor* visitor) override;
+ int start_register() { return start_reg_; }
+ int end_register() { return end_reg_; }
+ bool read_backward() { return read_backward_; }
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int recursion_depth,
+ bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override {
+ return;
+ }
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+
+ private:
+ int start_reg_;
+ int end_reg_;
+ JSRegExp::Flags flags_;
+ bool read_backward_;
+};
+
+class EndNode : public RegExpNode {
+ public:
+ enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
+ EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int recursion_depth,
+ bool not_at_start) override {
+ return 0;
+ }
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override {
+ // Returning 0 from EatsAtLeast should ensure we never get here.
+ UNREACHABLE();
+ }
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
+ // Returning 0 from EatsAtLeast should ensure we never get here.
+ UNREACHABLE();
+ }
+
+ private:
+ Action action_;
+};
+
+class NegativeSubmatchSuccess : public EndNode {
+ public:
+ NegativeSubmatchSuccess(int stack_pointer_reg, int position_reg,
+ int clear_capture_count, int clear_capture_start,
+ Zone* zone)
+ : EndNode(NEGATIVE_SUBMATCH_SUCCESS, zone),
+ stack_pointer_register_(stack_pointer_reg),
+ current_position_register_(position_reg),
+ clear_capture_count_(clear_capture_count),
+ clear_capture_start_(clear_capture_start) {}
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+
+ private:
+ int stack_pointer_register_;
+ int current_position_register_;
+ int clear_capture_count_;
+ int clear_capture_start_;
+};
+
+class Guard : public ZoneObject {
+ public:
+ enum Relation { LT, GEQ };
+ Guard(int reg, Relation op, int value) : reg_(reg), op_(op), value_(value) {}
+ int reg() { return reg_; }
+ Relation op() { return op_; }
+ int value() { return value_; }
+
+ private:
+ int reg_;
+ Relation op_;
+ int value_;
+};
+
+class GuardedAlternative {
+ public:
+ explicit GuardedAlternative(RegExpNode* node)
+ : node_(node), guards_(nullptr) {}
+ void AddGuard(Guard* guard, Zone* zone);
+ RegExpNode* node() { return node_; }
+ void set_node(RegExpNode* node) { node_ = node; }
+ ZoneList<Guard*>* guards() { return guards_; }
+
+ private:
+ RegExpNode* node_;
+ ZoneList<Guard*>* guards_;
+};
+
+class AlternativeGeneration;
+
+class ChoiceNode : public RegExpNode {
+ public:
+ explicit ChoiceNode(int expected_size, Zone* zone)
+ : RegExpNode(zone),
+ alternatives_(new (zone)
+ ZoneList<GuardedAlternative>(expected_size, zone)),
+ not_at_start_(false),
+ being_calculated_(false) {}
+ void Accept(NodeVisitor* visitor) override;
+ void AddAlternative(GuardedAlternative node) {
+ alternatives()->Add(node, zone());
+ }
+ ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ int EatsAtLeastHelper(int still_to_find, int budget,
+ RegExpNode* ignore_this_node, bool not_at_start);
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+
+ bool being_calculated() { return being_calculated_; }
+ bool not_at_start() { return not_at_start_; }
+ void set_not_at_start() { not_at_start_ = true; }
+ void set_being_calculated(bool b) { being_calculated_ = b; }
+ virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
+ return true;
+ }
+ RegExpNode* FilterOneByte(int depth) override;
+ virtual bool read_backward() { return false; }
+
+ protected:
+ int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
+ ZoneList<GuardedAlternative>* alternatives_;
+
+ private:
+ friend class Analysis;
+
+ void GenerateGuard(RegExpMacroAssembler* macro_assembler, Guard* guard,
+ Trace* trace);
+ int CalculatePreloadCharacters(RegExpCompiler* compiler, int eats_at_least);
+ void EmitOutOfLineContinuation(RegExpCompiler* compiler, Trace* trace,
+ GuardedAlternative alternative,
+ AlternativeGeneration* alt_gen,
+ int preload_characters,
+ bool next_expects_preload);
+ void SetUpPreLoad(RegExpCompiler* compiler, Trace* current_trace,
+ PreloadState* preloads);
+ void AssertGuardsMentionRegisters(Trace* trace);
+ int EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler, Trace* trace);
+ Trace* EmitGreedyLoop(RegExpCompiler* compiler, Trace* trace,
+ AlternativeGenerationList* alt_gens,
+ PreloadState* preloads,
+ GreedyLoopState* greedy_loop_state, int text_length);
+ void EmitChoices(RegExpCompiler* compiler,
+ AlternativeGenerationList* alt_gens, int first_choice,
+ Trace* trace, PreloadState* preloads);
+
+ // If true, this node is never checked at the start of the input.
+ // Allows a new trace to start with at_start() set to false.
+ bool not_at_start_;
+ bool being_calculated_;
+};
+
+class NegativeLookaroundChoiceNode : public ChoiceNode {
+ public:
+ explicit NegativeLookaroundChoiceNode(GuardedAlternative this_must_fail,
+ GuardedAlternative then_do_this,
+ Zone* zone)
+ : ChoiceNode(2, zone) {
+ AddAlternative(this_must_fail);
+ AddAlternative(then_do_this);
+ }
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
+ alternatives_->at(1).node()->FillInBMInfo(isolate, offset, budget - 1, bm,
+ not_at_start);
+ if (offset == 0) set_bm_info(not_at_start, bm);
+ }
+ // For a negative lookahead we don't emit the quick check for the
+ // alternative that is expected to fail. This is because quick check code
+ // starts by loading enough characters for the alternative that takes fewest
+ // characters, but on a negative lookahead the negative branch did not take
+ // part in that calculation (EatsAtLeast) so the assumptions don't hold.
+ bool try_to_emit_quick_check_for_alternative(bool is_first) override {
+ return !is_first;
+ }
+ RegExpNode* FilterOneByte(int depth) override;
+};
+
+class LoopChoiceNode : public ChoiceNode {
+ public:
+ LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
+ : ChoiceNode(2, zone),
+ loop_node_(nullptr),
+ continue_node_(nullptr),
+ body_can_be_zero_length_(body_can_be_zero_length),
+ read_backward_(read_backward) {}
+ void AddLoopAlternative(GuardedAlternative alt);
+ void AddContinueAlternative(GuardedAlternative alt);
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ RegExpNode* loop_node() { return loop_node_; }
+ RegExpNode* continue_node() { return continue_node_; }
+ bool body_can_be_zero_length() { return body_can_be_zero_length_; }
+ bool read_backward() override { return read_backward_; }
+ void Accept(NodeVisitor* visitor) override;
+ RegExpNode* FilterOneByte(int depth) override;
+
+ private:
+ // AddAlternative is made private for loop nodes because alternatives
+ // should not be added freely, we need to keep track of which node
+ // goes back to the node itself.
+ void AddAlternative(GuardedAlternative node) {
+ ChoiceNode::AddAlternative(node);
+ }
+
+ RegExpNode* loop_node_;
+ RegExpNode* continue_node_;
+ bool body_can_be_zero_length_;
+ bool read_backward_;
+};
+
+class NodeVisitor {
+ public:
+ virtual ~NodeVisitor() = default;
+#define DECLARE_VISIT(Type) virtual void Visit##Type(Type##Node* that) = 0;
+ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_NODES_H_
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 7cae456f56..3647680969 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -9,8 +9,9 @@
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
#include "src/regexp/property-sequences.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp.h"
#include "src/strings/char-predicates-inl.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
@@ -879,24 +880,25 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name,
DCHECK(0 < index && index <= captures_started_);
DCHECK_NOT_NULL(name);
+ RegExpCapture* capture = GetCapture(index);
+ DCHECK_NULL(capture->name());
+
+ capture->set_name(name);
+
if (named_captures_ == nullptr) {
- named_captures_ = new (zone()) ZoneList<RegExpCapture*>(1, zone());
+ named_captures_ = new (zone_->New(sizeof(*named_captures_)))
+ ZoneSet<RegExpCapture*, RegExpCaptureNameLess>(zone());
} else {
// Check for duplicates and bail if we find any.
- // TODO(jgruber): O(n^2).
- for (const auto& named_capture : *named_captures_) {
- if (*named_capture->name() == *name) {
- ReportError(CStrVector("Duplicate capture group name"));
- return false;
- }
+
+ const auto& named_capture_it = named_captures_->find(capture);
+ if (named_capture_it != named_captures_->end()) {
+ ReportError(CStrVector("Duplicate capture group name"));
+ return false;
}
}
- RegExpCapture* capture = GetCapture(index);
- DCHECK_NULL(capture->name());
-
- capture->set_name(name);
- named_captures_->Add(capture, zone());
+ named_captures_->emplace(capture);
return true;
}
@@ -943,20 +945,22 @@ void RegExpParser::PatchNamedBackReferences() {
}
// Look up and patch the actual capture for each named back reference.
- // TODO(jgruber): O(n^2), optimize if necessary.
for (int i = 0; i < named_back_references_->length(); i++) {
RegExpBackReference* ref = named_back_references_->at(i);
- int index = -1;
- for (const auto& capture : *named_captures_) {
- if (*capture->name() == *ref->name()) {
- index = capture->index();
- break;
- }
- }
+ // Capture used to search the named_captures_ by name, index of the
+ // capture is never used.
+ static const int kInvalidIndex = 0;
+ RegExpCapture* search_capture = new (zone()) RegExpCapture(kInvalidIndex);
+ DCHECK_NULL(search_capture->name());
+ search_capture->set_name(ref->name());
- if (index == -1) {
+ int index = -1;
+ const auto& capture_it = named_captures_->find(search_capture);
+ if (capture_it != named_captures_->end()) {
+ index = (*capture_it)->index();
+ } else {
ReportError(CStrVector("Invalid named capture referenced"));
return;
}
@@ -981,16 +985,17 @@ RegExpCapture* RegExpParser::GetCapture(int index) {
}
Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
- if (named_captures_ == nullptr || named_captures_->is_empty())
+ if (named_captures_ == nullptr || named_captures_->empty()) {
return Handle<FixedArray>();
+ }
Factory* factory = isolate()->factory();
- int len = named_captures_->length() * 2;
+ int len = static_cast<int>(named_captures_->size()) * 2;
Handle<FixedArray> array = factory->NewFixedArray(len);
- for (int i = 0; i < named_captures_->length(); i++) {
- RegExpCapture* capture = named_captures_->at(i);
+ int i = 0;
+ for (const auto& capture : *named_captures_) {
Vector<const uc16> capture_name(capture->name()->data(),
capture->name()->size());
// CSA code in ConstructNewResultFromMatchInfo requires these strings to be
@@ -998,7 +1003,10 @@ Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
Handle<String> name = factory->InternalizeString(capture_name);
array->set(i * 2, *name);
array->set(i * 2 + 1, Smi::FromInt(capture->index()));
+
+ i++;
}
+ DCHECK_EQ(i * 2, len);
return array;
}
@@ -1963,12 +1971,6 @@ void RegExpBuilder::AddTerm(RegExpTree* term) {
void RegExpBuilder::AddAssertion(RegExpTree* assert) {
FlushText();
- if (terms_.length() > 0 && terms_.last()->IsAssertion()) {
- // Omit repeated assertions of the same type.
- RegExpAssertion* last = terms_.last()->AsAssertion();
- RegExpAssertion* next = assert->AsAssertion();
- if (last->assertion_type() == next->assertion_type()) return;
- }
terms_.Add(assert, zone());
LAST(ADD_ASSERT);
}
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index bf9e62ed71..36cec7e984 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -326,11 +326,19 @@ class V8_EXPORT_PRIVATE RegExpParser {
FlatStringReader* in() { return in_; }
void ScanForCaptures();
+ struct RegExpCaptureNameLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return *lhs->name() < *rhs->name();
+ }
+ };
+
Isolate* isolate_;
Zone* zone_;
Handle<String>* error_;
ZoneList<RegExpCapture*>* captures_;
- ZoneList<RegExpCapture*>* named_captures_;
+ ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
ZoneList<RegExpBackReference*>* named_back_references_;
FlatStringReader* in_;
uc32 current_;
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 49f9d4476b..ad50270fdc 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -8,7 +8,7 @@
#include "src/heap/factory.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -179,7 +179,9 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
return false;
}
- if (!isolate->IsRegExpSpeciesLookupChainIntact()) return false;
+ if (!isolate->IsRegExpSpeciesLookupChainIntact(isolate->native_context())) {
+ return false;
+ }
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
new file mode 100644
index 0000000000..15b0321c46
--- /dev/null
+++ b/deps/v8/src/regexp/regexp.cc
@@ -0,0 +1,1018 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp.h"
+
+#include "src/codegen/compilation-cache.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/regexp/regexp-bytecode-generator.h"
+#include "src/regexp/regexp-compiler.h"
+#include "src/regexp/regexp-dotprinter.h"
+#include "src/regexp/regexp-interpreter.h"
+#include "src/regexp/regexp-macro-assembler-arch.h"
+#include "src/regexp/regexp-parser.h"
+#include "src/strings/string-search.h"
+
+namespace v8 {
+namespace internal {
+
+using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+
+class RegExpImpl final : public AllStatic {
+ public:
+ // Returns a string representation of a regular expression.
+ // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
+ // This function calls the garbage collector if necessary.
+ static Handle<String> ToString(Handle<Object> value);
+
+ // Prepares a JSRegExp object with Irregexp-specific data.
+ static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
+ int capture_register_count);
+
+ static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> match_pattern);
+
+ static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
+ int output_size);
+
+ static Handle<Object> AtomExec(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info);
+
+ // Execute a regular expression on the subject, starting from index.
+ // If matching succeeds, return the number of matches. This can be larger
+ // than one in the case of global regular expressions.
+ // The captures and subcaptures are stored into the registers vector.
+ // If matching fails, returns RE_FAILURE.
+ // If execution fails, sets a pending exception and returns RE_EXCEPTION.
+ static int IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
+ int output_size);
+
+ // Execute an Irregexp bytecode pattern.
+ // On a successful match, the result is a JSArray containing
+ // captured positions. On a failure, the result is the null value.
+ // Returns an empty handle in case of an exception.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> IrregexpExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
+
+ static bool CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> sample_subject, bool is_one_byte);
+ static inline bool EnsureCompiledIrregexp(Isolate* isolate,
+ Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte);
+
+ // Returns true on success, false on failure.
+ static bool Compile(Isolate* isolate, Zone* zone, RegExpCompileData* input,
+ JSRegExp::Flags flags, Handle<String> pattern,
+ Handle<String> sample_subject, bool is_one_byte);
+
+ // For acting on the JSRegExp data FixedArray.
+ static int IrregexpMaxRegisterCount(FixedArray re);
+ static void SetIrregexpMaxRegisterCount(FixedArray re, int value);
+ static void SetIrregexpCaptureNameMap(FixedArray re,
+ Handle<FixedArray> value);
+ static int IrregexpNumberOfCaptures(FixedArray re);
+ static int IrregexpNumberOfRegisters(FixedArray re);
+ static ByteArray IrregexpByteCode(FixedArray re, bool is_one_byte);
+ static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
+};
+
+V8_WARN_UNUSED_RESULT
+static inline MaybeHandle<Object> ThrowRegExpException(
+ Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
+ Handle<String> error_text) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kMalformedRegExp, pattern, error_text),
+ Object);
+}
+
+inline void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> error_text) {
+ USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
+ error_text));
+}
+
+// Identifies the sort of regexps where the regexp engine is faster
+// than the code used for atom matches.
+static bool HasFewDifferentCharacters(Handle<String> pattern) {
+ int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
+ if (length <= kPatternTooShortForBoyerMoore) return false;
+ const int kMod = 128;
+ bool character_found[kMod];
+ int different = 0;
+ memset(&character_found[0], 0, sizeof(character_found));
+ for (int i = 0; i < length; i++) {
+ int ch = (pattern->Get(i) & (kMod - 1));
+ if (!character_found[ch]) {
+ character_found[ch] = true;
+ different++;
+ // We declare a regexp low-alphabet if it has at least 3 times as many
+ // characters as it has different characters.
+ if (different * 3 > length) return false;
+ }
+ }
+ return true;
+}
+
+// Generic RegExp methods. Dispatches to implementation specific methods.
+
+// static
+MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern,
+ JSRegExp::Flags flags) {
+ DCHECK(pattern->IsFlat());
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ MaybeHandle<FixedArray> maybe_cached =
+ compilation_cache->LookupRegExp(pattern, flags);
+ Handle<FixedArray> cached;
+ if (maybe_cached.ToHandle(&cached)) {
+ re->set_data(*cached);
+ return re;
+ }
+
+ PostponeInterruptsScope postpone(isolate);
+ RegExpCompileData parse_result;
+ FlatStringReader reader(isolate, pattern);
+ DCHECK(!isolate->has_pending_exception());
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ &parse_result)) {
+ // Throw an exception if we fail to parse the pattern.
+ return ThrowRegExpException(isolate, re, pattern, parse_result.error);
+ }
+
+ bool has_been_compiled = false;
+
+ if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
+ !HasFewDifferentCharacters(pattern)) {
+ // Parse-tree is a single atom that is equal to the pattern.
+ RegExpImpl::AtomCompile(isolate, re, pattern, flags, pattern);
+ has_been_compiled = true;
+ } else if (parse_result.tree->IsAtom() && !IsSticky(flags) &&
+ parse_result.capture_count == 0) {
+ RegExpAtom* atom = parse_result.tree->AsAtom();
+ Vector<const uc16> atom_pattern = atom->data();
+ Handle<String> atom_string;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, atom_string,
+ isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
+ if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
+ RegExpImpl::AtomCompile(isolate, re, pattern, flags, atom_string);
+ has_been_compiled = true;
+ }
+ }
+ if (!has_been_compiled) {
+ RegExpImpl::IrregexpInitialize(isolate, re, pattern, flags,
+ parse_result.capture_count);
+ }
+ DCHECK(re->data().IsFixedArray());
+ // Compilation succeeded so the data is set on the regexp
+ // and we can store it in the cache.
+ Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
+ compilation_cache->PutRegExp(pattern, flags, data);
+
+ return re;
+}
+
+// static
+MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info) {
+ switch (regexp->TypeTag()) {
+ case JSRegExp::ATOM:
+ return RegExpImpl::AtomExec(isolate, regexp, subject, index,
+ last_match_info);
+ case JSRegExp::IRREGEXP: {
+ return RegExpImpl::IrregexpExec(isolate, regexp, subject, index,
+ last_match_info);
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// RegExp Atom implementation: Simple string search using indexOf.
+
+void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> match_pattern) {
+ isolate->factory()->SetRegExpAtomData(re, JSRegExp::ATOM, pattern, flags,
+ match_pattern);
+}
+
+static void SetAtomLastCapture(Isolate* isolate,
+ Handle<RegExpMatchInfo> last_match_info,
+ String subject, int from, int to) {
+ SealHandleScope shs(isolate);
+ last_match_info->SetNumberOfCaptureRegisters(2);
+ last_match_info->SetLastSubject(subject);
+ last_match_info->SetLastInput(subject);
+ last_match_info->SetCapture(0, from);
+ last_match_info->SetCapture(1, to);
+}
+
+int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
+ int output_size) {
+ DCHECK_LE(0, index);
+ DCHECK_LE(index, subject->length());
+
+ subject = String::Flatten(isolate, subject);
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+
+ String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ int needle_len = needle.length();
+ DCHECK(needle.IsFlat());
+ DCHECK_LT(0, needle_len);
+
+ if (index + needle_len > subject->length()) {
+ return RegExp::RE_FAILURE;
+ }
+
+ for (int i = 0; i < output_size; i += 2) {
+ String::FlatContent needle_content = needle.GetFlatContent(no_gc);
+ String::FlatContent subject_content = subject->GetFlatContent(no_gc);
+ DCHECK(needle_content.IsFlat());
+ DCHECK(subject_content.IsFlat());
+ // dispatch on type of strings
+ index =
+ (needle_content.IsOneByte()
+ ? (subject_content.IsOneByte()
+ ? SearchString(isolate, subject_content.ToOneByteVector(),
+ needle_content.ToOneByteVector(), index)
+ : SearchString(isolate, subject_content.ToUC16Vector(),
+ needle_content.ToOneByteVector(), index))
+ : (subject_content.IsOneByte()
+ ? SearchString(isolate, subject_content.ToOneByteVector(),
+ needle_content.ToUC16Vector(), index)
+ : SearchString(isolate, subject_content.ToUC16Vector(),
+ needle_content.ToUC16Vector(), index)));
+ if (index == -1) {
+ return i / 2; // Return number of matches.
+ } else {
+ output[i] = index;
+ output[i + 1] = index + needle_len;
+ index += needle_len;
+ }
+ }
+ return output_size / 2;
+}
+
+Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info) {
+ static const int kNumRegisters = 2;
+ STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
+ int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
+
+ int res =
+ AtomExecRaw(isolate, re, subject, index, output_registers, kNumRegisters);
+
+ if (res == RegExp::RE_FAILURE) return isolate->factory()->null_value();
+
+ DCHECK_EQ(res, RegExp::RE_SUCCESS);
+ SealHandleScope shs(isolate);
+ SetAtomLastCapture(isolate, last_match_info, *subject, output_registers[0],
+ output_registers[1]);
+ return last_match_info;
+}
+
+// Irregexp implementation.
+
+// Ensures that the regexp object contains a compiled version of the
+// source for either one-byte or two-byte subject strings.
+// If the compiled version doesn't already exist, it is compiled
+// from the source pattern.
+// If compilation fails, an exception is thrown and this function
+// returns false.
+bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte) {
+ Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
+ if (compiled_code != Smi::FromInt(JSRegExp::kUninitializedValue)) {
+ DCHECK(FLAG_regexp_interpret_all ? compiled_code.IsByteArray()
+ : compiled_code.IsCode());
+ return true;
+ }
+ return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
+}
+
+bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte) {
+ // Compile the RegExp.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ PostponeInterruptsScope postpone(isolate);
+#ifdef DEBUG
+ Object entry = re->DataAt(JSRegExp::code_index(is_one_byte));
+ // When arriving here entry can only be a smi representing an uncompiled
+ // regexp.
+ DCHECK(entry.IsSmi());
+ int entry_value = Smi::ToInt(entry);
+ DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
+#endif
+
+ JSRegExp::Flags flags = re->GetFlags();
+
+ Handle<String> pattern(re->Pattern(), isolate);
+ pattern = String::Flatten(isolate, pattern);
+ RegExpCompileData compile_data;
+ FlatStringReader reader(isolate, pattern);
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ &compile_data)) {
+ // Throw an exception if we fail to parse the pattern.
+ // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
+ USE(ThrowRegExpException(isolate, re, pattern, compile_data.error));
+ return false;
+ }
+ const bool compilation_succeeded =
+ Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
+ is_one_byte);
+ if (!compilation_succeeded) {
+ DCHECK(!compile_data.error.is_null());
+ ThrowRegExpException(isolate, re, compile_data.error);
+ return false;
+ }
+
+ Handle<FixedArray> data =
+ Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
+ data->set(JSRegExp::code_index(is_one_byte), compile_data.code);
+ SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
+ int register_max = IrregexpMaxRegisterCount(*data);
+ if (compile_data.register_count > register_max) {
+ SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
+ }
+
+ return true;
+}
+
+int RegExpImpl::IrregexpMaxRegisterCount(FixedArray re) {
+ return Smi::cast(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex)).value();
+}
+
+void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray re, int value) {
+ re.set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
+}
+
+void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray re,
+ Handle<FixedArray> value) {
+ if (value.is_null()) {
+ re.set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
+ } else {
+ re.set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
+ }
+}
+
+int RegExpImpl::IrregexpNumberOfCaptures(FixedArray re) {
+ return Smi::ToInt(re.get(JSRegExp::kIrregexpCaptureCountIndex));
+}
+
+int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
+ return Smi::ToInt(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex));
+}
+
+ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
+ return ByteArray::cast(re.get(JSRegExp::code_index(is_one_byte)));
+}
+
+Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
+ return Code::cast(re.get(JSRegExp::code_index(is_one_byte)));
+}
+
+void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern,
+ JSRegExp::Flags flags, int capture_count) {
+ // Initialize compiled code entries to null.
+ isolate->factory()->SetRegExpIrregexpData(re, JSRegExp::IRREGEXP, pattern,
+ flags, capture_count);
+}
+
+// static
+int RegExp::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject) {
+ DCHECK(subject->IsFlat());
+
+ // Check representation of the underlying storage.
+ bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+ if (!RegExpImpl::EnsureCompiledIrregexp(isolate, regexp, subject,
+ is_one_byte)) {
+ return -1;
+ }
+
+ DisallowHeapAllocation no_gc;
+ FixedArray data = FixedArray::cast(regexp->data());
+ if (FLAG_regexp_interpret_all) {
+ // Byte-code regexp needs space allocated for all its registers.
+ // The result captures are copied to the start of the registers array
+ // if the match succeeds. This way those registers are not clobbered
+ // when we set the last match info from last successful match.
+ return RegExpImpl::IrregexpNumberOfRegisters(data) +
+ (RegExpImpl::IrregexpNumberOfCaptures(data) + 1) * 2;
+ } else {
+ // Native regexp only needs room to output captures. Registers are handled
+ // internally.
+ return (RegExpImpl::IrregexpNumberOfCaptures(data) + 1) * 2;
+ }
+}
+
+int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ int32_t* output, int output_size) {
+ Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
+
+ DCHECK_LE(0, index);
+ DCHECK_LE(index, subject->length());
+ DCHECK(subject->IsFlat());
+
+ bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+
+ if (!FLAG_regexp_interpret_all) {
+ DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+ do {
+ EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
+ Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
+ // The stack is used to allocate registers for the compiled regexp code.
+ // This means that in case of failure, the output registers array is left
+ // untouched and contains the capture results from the previous successful
+ // match. We can use that to set the last match info lazily.
+ int res = NativeRegExpMacroAssembler::Match(code, subject, output,
+ output_size, index, isolate);
+ if (res != NativeRegExpMacroAssembler::RETRY) {
+ DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
+ isolate->has_pending_exception());
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) ==
+ RegExp::RE_SUCCESS);
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::FAILURE) ==
+ RegExp::RE_FAILURE);
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION) ==
+ RegExp::RE_EXCEPTION);
+ return res;
+ }
+ // If result is RETRY, the string has changed representation, and we
+ // must restart from scratch.
+ // In this case, it means we must make sure we are prepared to handle
+ // the, potentially, different subject (the string can switch between
+ // being internal and external, and even between being Latin1 and UC16,
+ // but the characters are always the same).
+ RegExp::IrregexpPrepare(isolate, regexp, subject);
+ is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+ } while (true);
+ UNREACHABLE();
+ } else {
+ DCHECK(FLAG_regexp_interpret_all);
+ DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
+ // We must have done EnsureCompiledIrregexp, so we can get the number of
+ // registers.
+ int number_of_capture_registers =
+ (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
+ int32_t* raw_output = &output[number_of_capture_registers];
+
+ do {
+ // We do not touch the actual capture result registers until we know there
+ // has been a match so that we can use those capture results to set the
+ // last match info.
+ for (int i = number_of_capture_registers - 1; i >= 0; i--) {
+ raw_output[i] = -1;
+ }
+ Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
+ isolate);
+
+ IrregexpInterpreter::Result result = IrregexpInterpreter::Match(
+ isolate, byte_codes, subject, raw_output, index);
+ DCHECK_IMPLIES(result == IrregexpInterpreter::EXCEPTION,
+ isolate->has_pending_exception());
+
+ switch (result) {
+ case IrregexpInterpreter::SUCCESS:
+ // Copy capture results to the start of the registers array.
+ MemCopy(output, raw_output,
+ number_of_capture_registers * sizeof(int32_t));
+ return result;
+ case IrregexpInterpreter::EXCEPTION:
+ case IrregexpInterpreter::FAILURE:
+ return result;
+ case IrregexpInterpreter::RETRY:
+ // The string has changed representation, and we must restart the
+ // match.
+ is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+ EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
+ break;
+ }
+ } while (true);
+ UNREACHABLE();
+ }
+}
+
+MaybeHandle<Object> RegExpImpl::IrregexpExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int previous_index, Handle<RegExpMatchInfo> last_match_info) {
+ DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+
+ subject = String::Flatten(isolate, subject);
+
+ // Prepare space for the return values.
+#ifdef DEBUG
+ if (FLAG_regexp_interpret_all && FLAG_trace_regexp_bytecodes) {
+ String pattern = regexp->Pattern();
+ PrintF("\n\nRegexp match: /%s/\n\n", pattern.ToCString().get());
+ PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
+ }
+#endif
+ int required_registers = RegExp::IrregexpPrepare(isolate, regexp, subject);
+ if (required_registers < 0) {
+ // Compiling failed with an exception.
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
+ }
+
+ int32_t* output_registers = nullptr;
+ if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ output_registers = NewArray<int32_t>(required_registers);
+ }
+ std::unique_ptr<int32_t[]> auto_release(output_registers);
+ if (output_registers == nullptr) {
+ output_registers = isolate->jsregexp_static_offsets_vector();
+ }
+
+ int res =
+ RegExpImpl::IrregexpExecRaw(isolate, regexp, subject, previous_index,
+ output_registers, required_registers);
+ if (res == RegExp::RE_SUCCESS) {
+ int capture_count =
+ IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
+ return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
+ capture_count, output_registers);
+ }
+ if (res == RegExp::RE_EXCEPTION) {
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
+ }
+ DCHECK(res == RegExp::RE_FAILURE);
+ return isolate->factory()->null_value();
+}
+
+// static
+Handle<RegExpMatchInfo> RegExp::SetLastMatchInfo(
+ Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
+ Handle<String> subject, int capture_count, int32_t* match) {
+ // This is the only place where match infos can grow. If, after executing the
+ // regexp, RegExpExecStub finds that the match info is too small, it restarts
+ // execution in RegExpImpl::Exec, which finally grows the match info right
+ // here.
+
+ int capture_register_count = (capture_count + 1) * 2;
+ Handle<RegExpMatchInfo> result = RegExpMatchInfo::ReserveCaptures(
+ isolate, last_match_info, capture_register_count);
+ result->SetNumberOfCaptureRegisters(capture_register_count);
+
+ if (*result != *last_match_info) {
+ if (*last_match_info == *isolate->regexp_last_match_info()) {
+ // This inner condition is only needed for special situations like the
+ // regexp fuzzer, where we pass our own custom RegExpMatchInfo to
+ // RegExpImpl::Exec; there actually want to bypass the Isolate's match
+ // info and execute the regexp without side effects.
+ isolate->native_context()->set_regexp_last_match_info(*result);
+ }
+ }
+
+ DisallowHeapAllocation no_allocation;
+ if (match != nullptr) {
+ for (int i = 0; i < capture_register_count; i += 2) {
+ result->SetCapture(i, match[i]);
+ result->SetCapture(i + 1, match[i + 1]);
+ }
+ }
+ result->SetLastSubject(*subject);
+ result->SetLastInput(*subject);
+ return result;
+}
+
+// static
+void RegExp::DotPrintForTesting(const char* label, RegExpNode* node) {
+ DotPrinter::DotPrint(label, node);
+}
+
+namespace {
+
+// Returns true if we've either generated too much irregex code within this
+// isolate, or the pattern string is too long.
+bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
+ // Limit the space regexps take up on the heap. In order to limit this we
+ // would like to keep track of the amount of regexp code on the heap. This
+ // is not tracked, however. As a conservative approximation we track the
+ // total regexp code compiled including code that has subsequently been freed
+ // and the total executable memory at any point.
+ static constexpr size_t kRegExpExecutableMemoryLimit = 16 * MB;
+ static constexpr size_t kRegExpCompiledLimit = 1 * MB;
+
+ Heap* heap = isolate->heap();
+ if (pattern->length() > RegExp::kRegExpTooLargeToOptimize) return true;
+ return (isolate->total_regexp_code_generated() > kRegExpCompiledLimit &&
+ heap->CommittedMemoryExecutable() > kRegExpExecutableMemoryLimit);
+}
+
+} // namespace
+
+// static
+bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
+ RegExpCompileData* data, JSRegExp::Flags flags,
+ Handle<String> pattern,
+ Handle<String> sample_subject,
+ bool is_one_byte) {
+ return RegExpImpl::Compile(isolate, zone, data, flags, pattern,
+ sample_subject, is_one_byte);
+}
+
+bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
+ JSRegExp::Flags flags, Handle<String> pattern,
+ Handle<String> sample_subject, bool is_one_byte) {
+ if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
+ data->error =
+ isolate->factory()->NewStringFromAsciiChecked("RegExp too big");
+ return false;
+ }
+
+ bool is_sticky = IsSticky(flags);
+ bool is_global = IsGlobal(flags);
+ bool is_unicode = IsUnicode(flags);
+ RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
+
+ if (compiler.optimize()) {
+ compiler.set_optimize(!TooMuchRegExpCode(isolate, pattern));
+ }
+
+ // Sample some characters from the middle of the string.
+ static const int kSampleSize = 128;
+
+ sample_subject = String::Flatten(isolate, sample_subject);
+ int chars_sampled = 0;
+ int half_way = (sample_subject->length() - kSampleSize) / 2;
+ for (int i = Max(0, half_way);
+ i < sample_subject->length() && chars_sampled < kSampleSize;
+ i++, chars_sampled++) {
+ compiler.frequency_collator()->CountCharacter(sample_subject->Get(i));
+ }
+
+ // Wrap the body of the regexp in capture #0.
+ RegExpNode* captured_body =
+ RegExpCapture::ToNode(data->tree, 0, &compiler, compiler.accept());
+ RegExpNode* node = captured_body;
+ bool is_end_anchored = data->tree->IsAnchoredAtEnd();
+ bool is_start_anchored = data->tree->IsAnchoredAtStart();
+ int max_length = data->tree->max_match();
+ if (!is_start_anchored && !is_sticky) {
+ // Add a .*? at the beginning, outside the body capture, unless
+ // this expression is anchored at the beginning or sticky.
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ RegExpNode* loop_node = RegExpQuantifier::ToNode(
+ 0, RegExpTree::kInfinity, false,
+ new (zone) RegExpCharacterClass('*', default_flags), &compiler,
+ captured_body, data->contains_anchor);
+
+ if (data->contains_anchor) {
+ // Unroll loop once, to take care of the case that might start
+ // at the start of input.
+ ChoiceNode* first_step_node = new (zone) ChoiceNode(2, zone);
+ first_step_node->AddAlternative(GuardedAlternative(captured_body));
+ first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
+ new (zone) RegExpCharacterClass('*', default_flags), false,
+ loop_node)));
+ node = first_step_node;
+ } else {
+ node = loop_node;
+ }
+ }
+ if (is_one_byte) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ // Do it again to propagate the new nodes to places where they were not
+ // put because they had not been calculated yet.
+ if (node != nullptr) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ }
+ } else if (is_unicode && (is_global || is_sticky)) {
+ node = RegExpCompiler::OptionallyStepBackToLeadSurrogate(&compiler, node,
+ flags);
+ }
+
+ if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
+ data->node = node;
+ Analysis analysis(isolate, is_one_byte);
+ analysis.EnsureAnalyzed(node);
+ if (analysis.has_failed()) {
+ data->error =
+ isolate->factory()->NewStringFromAsciiChecked(analysis.error_message());
+ return false;
+ }
+
+ // Create the correct assembler for the architecture.
+ std::unique_ptr<RegExpMacroAssembler> macro_assembler;
+ if (!FLAG_regexp_interpret_all) {
+ // Native regexp implementation.
+ DCHECK(!FLAG_jitless);
+
+ NativeRegExpMacroAssembler::Mode mode =
+ is_one_byte ? NativeRegExpMacroAssembler::LATIN1
+ : NativeRegExpMacroAssembler::UC16;
+
+#if V8_TARGET_ARCH_IA32
+ macro_assembler.reset(new RegExpMacroAssemblerIA32(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_X64
+ macro_assembler.reset(new RegExpMacroAssemblerX64(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_ARM
+ macro_assembler.reset(new RegExpMacroAssemblerARM(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_ARM64
+ macro_assembler.reset(new RegExpMacroAssemblerARM64(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_S390
+ macro_assembler.reset(new RegExpMacroAssemblerS390(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_PPC
+ macro_assembler.reset(new RegExpMacroAssemblerPPC(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_MIPS
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_MIPS64
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#else
+#error "Unsupported architecture"
+#endif
+ } else {
+ DCHECK(FLAG_regexp_interpret_all);
+
+ // Interpreted regexp implementation.
+ macro_assembler.reset(new RegExpBytecodeGenerator(isolate, zone));
+ }
+
+ macro_assembler->set_slow_safe(TooMuchRegExpCode(isolate, pattern));
+
+ // Inserted here, instead of in Assembler, because it depends on information
+ // in the AST that isn't replicated in the Node structure.
+ static const int kMaxBacksearchLimit = 1024;
+ if (is_end_anchored && !is_start_anchored && !is_sticky &&
+ max_length < kMaxBacksearchLimit) {
+ macro_assembler->SetCurrentPositionFromEnd(max_length);
+ }
+
+ if (is_global) {
+ RegExpMacroAssembler::GlobalMode mode = RegExpMacroAssembler::GLOBAL;
+ if (data->tree->min_match() > 0) {
+ mode = RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK;
+ } else if (is_unicode) {
+ mode = RegExpMacroAssembler::GLOBAL_UNICODE;
+ }
+ macro_assembler->set_global_mode(mode);
+ }
+
+ RegExpCompiler::CompilationResult result = compiler.Assemble(
+ isolate, macro_assembler.get(), node, data->capture_count, pattern);
+
+ if (FLAG_correctness_fuzzer_suppressions &&
+ strncmp(result.error_message, "Stack overflow", 15) == 0) {
+ FATAL("Aborting on stack overflow");
+ }
+
+ if (result.error_message != nullptr) {
+ data->error =
+ isolate->factory()->NewStringFromAsciiChecked(result.error_message);
+ }
+ data->code = result.code;
+ data->register_count = result.num_registers;
+
+ return result.Succeeded();
+}
+
+RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
+ Handle<String> subject, Isolate* isolate)
+ : register_array_(nullptr),
+ register_array_size_(0),
+ regexp_(regexp),
+ subject_(subject),
+ isolate_(isolate) {
+ bool interpreted = FLAG_regexp_interpret_all;
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ static const int kAtomRegistersPerMatch = 2;
+ registers_per_match_ = kAtomRegistersPerMatch;
+ // There is no distinction between interpreted and native for atom regexps.
+ interpreted = false;
+ } else {
+ registers_per_match_ = RegExp::IrregexpPrepare(isolate_, regexp_, subject_);
+ if (registers_per_match_ < 0) {
+ num_matches_ = -1; // Signal exception.
+ return;
+ }
+ }
+
+ DCHECK(IsGlobal(regexp->GetFlags()));
+ if (!interpreted) {
+ register_array_size_ =
+ Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
+ max_matches_ = register_array_size_ / registers_per_match_;
+ } else {
+ // Global loop in interpreted regexp is not implemented. We choose
+ // the size of the offsets vector so that it can only store one match.
+ register_array_size_ = registers_per_match_;
+ max_matches_ = 1;
+ }
+
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ register_array_ = NewArray<int32_t>(register_array_size_);
+ } else {
+ register_array_ = isolate->jsregexp_static_offsets_vector();
+ }
+
+ // Set state so that fetching the results the first time triggers a call
+ // to the compiled regexp.
+ current_match_index_ = max_matches_ - 1;
+ num_matches_ = max_matches_;
+ DCHECK_LE(2, registers_per_match_); // Each match has at least one capture.
+ DCHECK_GE(register_array_size_, registers_per_match_);
+ int32_t* last_match =
+ &register_array_[current_match_index_ * registers_per_match_];
+ last_match[0] = -1;
+ last_match[1] = 0;
+}
+
+RegExpGlobalCache::~RegExpGlobalCache() {
+ // Deallocate the register array if we allocated it in the constructor
+ // (as opposed to using the existing jsregexp_static_offsets_vector).
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ DeleteArray(register_array_);
+ }
+}
+
+int RegExpGlobalCache::AdvanceZeroLength(int last_index) {
+ if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
+ unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
+ unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
+ // Advance over the surrogate pair.
+ return last_index + 2;
+ }
+ return last_index + 1;
+}
+
+int32_t* RegExpGlobalCache::FetchNext() {
+ current_match_index_++;
+ if (current_match_index_ >= num_matches_) {
+ // Current batch of results exhausted.
+ // Fail if last batch was not even fully filled.
+ if (num_matches_ < max_matches_) {
+ num_matches_ = 0; // Signal failed match.
+ return nullptr;
+ }
+
+ int32_t* last_match =
+ &register_array_[(current_match_index_ - 1) * registers_per_match_];
+ int last_end_index = last_match[1];
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ num_matches_ =
+ RegExpImpl::AtomExecRaw(isolate_, regexp_, subject_, last_end_index,
+ register_array_, register_array_size_);
+ } else {
+ int last_start_index = last_match[0];
+ if (last_start_index == last_end_index) {
+ // Zero-length match. Advance by one code point.
+ last_end_index = AdvanceZeroLength(last_end_index);
+ }
+ if (last_end_index > subject_->length()) {
+ num_matches_ = 0; // Signal failed match.
+ return nullptr;
+ }
+ num_matches_ = RegExpImpl::IrregexpExecRaw(
+ isolate_, regexp_, subject_, last_end_index, register_array_,
+ register_array_size_);
+ }
+
+ if (num_matches_ <= 0) return nullptr;
+ current_match_index_ = 0;
+ return register_array_;
+ } else {
+ return &register_array_[current_match_index_ * registers_per_match_];
+ }
+}
+
+int32_t* RegExpGlobalCache::LastSuccessfulMatch() {
+ int index = current_match_index_ * registers_per_match_;
+ if (num_matches_ == 0) {
+ // After a failed match we shift back by one result.
+ index -= registers_per_match_;
+ }
+ return &register_array_[index];
+}
+
+Object RegExpResultsCache::Lookup(Heap* heap, String key_string,
+ Object key_pattern,
+ FixedArray* last_match_cache,
+ ResultsCacheType type) {
+ FixedArray cache;
+ if (!key_string.IsInternalizedString()) return Smi::kZero;
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ DCHECK(key_pattern.IsString());
+ if (!key_pattern.IsInternalizedString()) return Smi::kZero;
+ cache = heap->string_split_cache();
+ } else {
+ DCHECK(type == REGEXP_MULTIPLE_INDICES);
+ DCHECK(key_pattern.IsFixedArray());
+ cache = heap->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string.Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+ ~(kArrayEntriesPerCacheEntry - 1));
+ if (cache.get(index + kStringOffset) != key_string ||
+ cache.get(index + kPatternOffset) != key_pattern) {
+ index =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache.get(index + kStringOffset) != key_string ||
+ cache.get(index + kPatternOffset) != key_pattern) {
+ return Smi::kZero;
+ }
+ }
+
+ *last_match_cache = FixedArray::cast(cache.get(index + kLastMatchOffset));
+ return cache.get(index + kArrayOffset);
+}
+
+void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
+ Handle<Object> key_pattern,
+ Handle<FixedArray> value_array,
+ Handle<FixedArray> last_match_cache,
+ ResultsCacheType type) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> cache;
+ if (!key_string->IsInternalizedString()) return;
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ DCHECK(key_pattern->IsString());
+ if (!key_pattern->IsInternalizedString()) return;
+ cache = factory->string_split_cache();
+ } else {
+ DCHECK(type == REGEXP_MULTIPLE_INDICES);
+ DCHECK(key_pattern->IsFixedArray());
+ cache = factory->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+ ~(kArrayEntriesPerCacheEntry - 1));
+ if (cache->get(index + kStringOffset) == Smi::kZero) {
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
+ } else {
+ uint32_t index2 =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index2 + kStringOffset) == Smi::kZero) {
+ cache->set(index2 + kStringOffset, *key_string);
+ cache->set(index2 + kPatternOffset, *key_pattern);
+ cache->set(index2 + kArrayOffset, *value_array);
+ cache->set(index2 + kLastMatchOffset, *last_match_cache);
+ } else {
+ cache->set(index2 + kStringOffset, Smi::kZero);
+ cache->set(index2 + kPatternOffset, Smi::kZero);
+ cache->set(index2 + kArrayOffset, Smi::kZero);
+ cache->set(index2 + kLastMatchOffset, Smi::kZero);
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
+ }
+ }
+ // If the array is a reasonably short list of substrings, convert it into a
+ // list of internalized strings.
+ if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
+ for (int i = 0; i < value_array->length(); i++) {
+ Handle<String> str(String::cast(value_array->get(i)), isolate);
+ Handle<String> internalized_str = factory->InternalizeString(str);
+ value_array->set(i, *internalized_str);
+ }
+ }
+ // Convert backing store to a copy-on-write array.
+ value_array->set_map_no_write_barrier(
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
+}
+
+void RegExpResultsCache::Clear(FixedArray cache) {
+ for (int i = 0; i < kRegExpResultsCacheSize; i++) {
+ cache.set(i, Smi::kZero);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
new file mode 100644
index 0000000000..0f3ed463da
--- /dev/null
+++ b/deps/v8/src/regexp/regexp.h
@@ -0,0 +1,177 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_H_
+#define V8_REGEXP_REGEXP_H_
+
+#include "src/objects/js-regexp.h"
+
+namespace v8 {
+namespace internal {
+
+class RegExpNode;
+class RegExpTree;
+
+// TODO(jgruber): Consider splitting between ParseData and CompileData.
+struct RegExpCompileData {
+ // The parsed AST as produced by the RegExpParser.
+ RegExpTree* tree = nullptr;
+
+ // The compiled Node graph as produced by RegExpTree::ToNode methods.
+ RegExpNode* node = nullptr;
+
+ // The generated code as produced by the compiler. Either a Code object (for
+ // irregexp native code) or a ByteArray (for irregexp bytecode).
+ Object code;
+
+ // True, iff the pattern is a 'simple' atom with zero captures. In other
+ // words, the pattern consists of a string with no metacharacters and special
+ // regexp features, and can be implemented as a standard string search.
+ bool simple = true;
+
+ // True, iff the pattern is anchored at the start of the string with '^'.
+ bool contains_anchor = false;
+
+ // Only use if the pattern contains named captures. If so, this contains a
+ // mapping of capture names to capture indices.
+ Handle<FixedArray> capture_name_map;
+
+ // The error message. Only used if an error occurred during parsing or
+ // compilation.
+ Handle<String> error;
+
+ // The number of capture groups, without the global capture \0.
+ int capture_count = 0;
+
+ // The number of registers used by the generated code.
+ int register_count = 0;
+};
+
+class RegExp final : public AllStatic {
+ public:
+ // Whether the irregexp engine generates native code or interpreter bytecode.
+ static bool GeneratesNativeCode() { return !FLAG_regexp_interpret_all; }
+
+ // Parses the RegExp pattern and prepares the JSRegExp object with
+ // generic data and choice of implementation - as well as what
+ // the implementation wants to store in the data field.
+ // Returns false if compilation fails.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
+ Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
+ JSRegExp::Flags flags);
+
+ // See ECMA-262 section 15.10.6.2.
+ // This function calls the garbage collector if necessary.
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
+
+ // Integral return values used throughout regexp code layers.
+ static constexpr int kInternalRegExpFailure = 0;
+ static constexpr int kInternalRegExpSuccess = 1;
+ static constexpr int kInternalRegExpException = -1;
+ static constexpr int kInternalRegExpRetry = -2;
+
+ enum IrregexpResult {
+ RE_FAILURE = kInternalRegExpFailure,
+ RE_SUCCESS = kInternalRegExpSuccess,
+ RE_EXCEPTION = kInternalRegExpException,
+ };
+
+ // Prepare a RegExp for being executed one or more times (using
+ // IrregexpExecOnce) on the subject.
+ // This ensures that the regexp is compiled for the subject, and that
+ // the subject is flat.
+ // Returns the number of integer spaces required by IrregexpExecOnce
+ // as its "registers" argument. If the regexp cannot be compiled,
+ // an exception is set as pending, and this function returns negative.
+ static int IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject);
+
+ // Set last match info. If match is nullptr, then setting captures is
+ // omitted.
+ static Handle<RegExpMatchInfo> SetLastMatchInfo(
+ Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
+ Handle<String> subject, int capture_count, int32_t* match);
+
+ V8_EXPORT_PRIVATE static bool CompileForTesting(Isolate* isolate, Zone* zone,
+ RegExpCompileData* input,
+ JSRegExp::Flags flags,
+ Handle<String> pattern,
+ Handle<String> sample_subject,
+ bool is_one_byte);
+
+ V8_EXPORT_PRIVATE static void DotPrintForTesting(const char* label,
+ RegExpNode* node);
+
+ static const int kRegExpTooLargeToOptimize = 20 * KB;
+};
+
+// Uses a special global mode of irregexp-generated code to perform a global
+// search and return multiple results at once. As such, this is essentially an
+// iterator over multiple results (retrieved batch-wise in advance).
+class RegExpGlobalCache final {
+ public:
+ RegExpGlobalCache(Handle<JSRegExp> regexp, Handle<String> subject,
+ Isolate* isolate);
+
+ ~RegExpGlobalCache();
+
+ // Fetch the next entry in the cache for global regexp match results.
+ // This does not set the last match info. Upon failure, nullptr is
+ // returned. The cause can be checked with Result(). The previous result is
+ // still in available in memory when a failure happens.
+ int32_t* FetchNext();
+
+ int32_t* LastSuccessfulMatch();
+
+ bool HasException() { return num_matches_ < 0; }
+
+ private:
+ int AdvanceZeroLength(int last_index);
+
+ int num_matches_;
+ int max_matches_;
+ int current_match_index_;
+ int registers_per_match_;
+ // Pointer to the last set of captures.
+ int32_t* register_array_;
+ int register_array_size_;
+ Handle<JSRegExp> regexp_;
+ Handle<String> subject_;
+ Isolate* isolate_;
+};
+
+// Caches results for specific regexp queries on the isolate. At the time of
+// writing, this is used during global calls to RegExp.prototype.exec and
+// @@split.
+class RegExpResultsCache final : public AllStatic {
+ public:
+ enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
+
+ // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
+ // On success, the returned result is guaranteed to be a COW-array.
+ static Object Lookup(Heap* heap, String key_string, Object key_pattern,
+ FixedArray* last_match_out, ResultsCacheType type);
+ // Attempt to add value_array to the cache specified by type. On success,
+ // value_array is turned into a COW-array.
+ static void Enter(Isolate* isolate, Handle<String> key_string,
+ Handle<Object> key_pattern, Handle<FixedArray> value_array,
+ Handle<FixedArray> last_match_cache, ResultsCacheType type);
+ static void Clear(FixedArray cache);
+
+ static constexpr int kRegExpResultsCacheSize = 0x100;
+
+ private:
+ static constexpr int kStringOffset = 0;
+ static constexpr int kPatternOffset = 1;
+ static constexpr int kArrayOffset = 2;
+ static constexpr int kLastMatchOffset = 3;
+ static constexpr int kArrayEntriesPerCacheEntry = 4;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_H_
diff --git a/deps/v8/src/roots/OWNERS b/deps/v8/src/roots/OWNERS
new file mode 100644
index 0000000000..2d6e1ae7c2
--- /dev/null
+++ b/deps/v8/src/roots/OWNERS
@@ -0,0 +1,11 @@
+bmeurer@chromium.org
+delphick@chromium.org
+hpayer@chromium.org
+ishell@chromium.org
+jgruber@chromium.org
+jkummerow@chromium.org
+marja@chromium.org
+sigurds@chromium.org
+ulan@chromium.org
+
+# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/roots/roots-inl.h b/deps/v8/src/roots/roots-inl.h
index 8153f1758f..4513f7ba97 100644
--- a/deps/v8/src/roots/roots-inl.h
+++ b/deps/v8/src/roots/roots-inl.h
@@ -31,7 +31,8 @@ V8_INLINE constexpr bool operator<(RootIndex lhs, RootIndex rhs) {
return static_cast<type>(lhs) < static_cast<type>(rhs);
}
-V8_INLINE RootIndex operator++(RootIndex& index) {
+V8_INLINE RootIndex
+operator++(RootIndex& index) { // NOLINT(runtime/references)
using type = typename std::underlying_type<RootIndex>::type;
index = static_cast<RootIndex>(static_cast<type>(index) + 1);
return index;
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 5684c28f4e..e6bcd94c01 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -117,7 +117,9 @@ class Symbol;
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
V(Map, small_ordered_name_dictionary_map, SmallOrderedNameDictionaryMap) \
+ V(Map, source_text_module_map, SourceTextModuleMap) \
V(Map, string_table_map, StringTableMap) \
+ V(Map, synthetic_module_map, SyntheticModuleMap) \
V(Map, uncompiled_data_without_preparse_data_map, \
UncompiledDataWithoutPreparseDataMap) \
V(Map, uncompiled_data_with_preparse_data_map, \
@@ -217,7 +219,6 @@ class Symbol;
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
- V(PropertyCell, regexp_species_protector, RegExpSpeciesProtector) \
V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
V(Cell, string_length_protector, StringLengthProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
diff --git a/deps/v8/src/runtime/OWNERS b/deps/v8/src/runtime/OWNERS
new file mode 100644
index 0000000000..450423f878
--- /dev/null
+++ b/deps/v8/src/runtime/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 0c17047795..522e93da3f 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -8,10 +8,10 @@
#include <limits>
#include "src/builtins/accessors.h"
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/elements.h"
@@ -150,8 +150,9 @@ inline void SetHomeObject(Isolate* isolate, JSFunction method,
// shared name.
template <typename Dictionary>
MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
- Isolate* isolate, Arguments& args, Smi index, Handle<JSObject> home_object,
- Handle<String> name_prefix, Handle<Object> key) {
+ Isolate* isolate, Arguments& args, // NOLINT(runtime/references)
+ Smi index, Handle<JSObject> home_object, Handle<String> name_prefix,
+ Handle<Object> key) {
int int_index = index.value();
// Class constructor and prototype values do not require post processing.
@@ -185,9 +186,10 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
// This is a simplified version of GetMethodWithSharedNameAndSetHomeObject()
// function above that is used when it's guaranteed that the method has
// shared name.
-Object GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
- Arguments& args, Object index,
- JSObject home_object) {
+Object GetMethodWithSharedNameAndSetHomeObject(
+ Isolate* isolate,
+ Arguments& args, // NOLINT(runtime/references)
+ Object index, JSObject home_object) {
DisallowHeapAllocation no_gc;
int int_index = Smi::ToInt(index);
@@ -226,7 +228,8 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
template <typename Dictionary>
bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
- Handle<JSObject> receiver, Arguments& args,
+ Handle<JSObject> receiver,
+ Arguments& args, // NOLINT(runtime/references)
bool* install_name_accessor = nullptr) {
Handle<Name> name_string = isolate->factory()->name_string();
@@ -284,7 +287,7 @@ bool AddDescriptorsByTemplate(
Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors_template,
Handle<NumberDictionary> elements_dictionary_template,
- Handle<JSObject> receiver, Arguments& args) {
+ Handle<JSObject> receiver, Arguments& args) { // NOLINT(runtime/references)
int nof_descriptors = descriptors_template->number_of_descriptors();
Handle<DescriptorArray> descriptors =
@@ -329,7 +332,8 @@ bool AddDescriptorsByTemplate(
value = GetMethodWithSharedNameAndSetHomeObject(isolate, args, value,
*receiver);
}
- details = details.CopyWithRepresentation(value.OptimalRepresentation());
+ details = details.CopyWithRepresentation(
+ value.OptimalRepresentation(isolate));
} else {
DCHECK_EQ(kAccessor, details.kind());
if (value.IsAccessorPair()) {
@@ -391,7 +395,8 @@ bool AddDescriptorsByTemplate(
Handle<NameDictionary> properties_dictionary_template,
Handle<NumberDictionary> elements_dictionary_template,
Handle<FixedArray> computed_properties, Handle<JSObject> receiver,
- bool install_name_accessor, Arguments& args) {
+ bool install_name_accessor,
+ Arguments& args) { // NOLINT(runtime/references)
int computed_properties_length = computed_properties->length();
// Shallow-copy properties template.
@@ -476,7 +481,8 @@ bool InitClassPrototype(Isolate* isolate,
Handle<ClassBoilerplate> class_boilerplate,
Handle<JSObject> prototype,
Handle<HeapObject> prototype_parent,
- Handle<JSFunction> constructor, Arguments& args) {
+ Handle<JSFunction> constructor,
+ Arguments& args) { // NOLINT(runtime/references)
Handle<Map> map(prototype->map(), isolate);
map = Map::CopyDropDescriptors(isolate, map);
map->set_is_prototype_map(true);
@@ -523,7 +529,8 @@ bool InitClassPrototype(Isolate* isolate,
bool InitClassConstructor(Isolate* isolate,
Handle<ClassBoilerplate> class_boilerplate,
Handle<HeapObject> constructor_parent,
- Handle<JSFunction> constructor, Arguments& args) {
+ Handle<JSFunction> constructor,
+ Arguments& args) { // NOLINT(runtime/references)
Handle<Map> map(constructor->map(), isolate);
map = Map::CopyDropDescriptors(isolate, map);
DCHECK(map->is_prototype_map());
@@ -572,11 +579,10 @@ bool InitClassConstructor(Isolate* isolate,
}
}
-MaybeHandle<Object> DefineClass(Isolate* isolate,
- Handle<ClassBoilerplate> class_boilerplate,
- Handle<Object> super_class,
- Handle<JSFunction> constructor,
- Arguments& args) {
+MaybeHandle<Object> DefineClass(
+ Isolate* isolate, Handle<ClassBoilerplate> class_boilerplate,
+ Handle<Object> super_class, Handle<JSFunction> constructor,
+ Arguments& args) { // NOLINT(runtime/references)
Handle<Object> prototype_parent;
Handle<HeapObject> constructor_parent;
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index b3b51ecc07..19c6f8bff5 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -4,12 +4,12 @@
#include "src/asmjs/asm-js.h"
#include "src/codegen/compiler.h"
+#include "src/common/message-template.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/objects/js-array-buffer-inl.h"
@@ -294,7 +294,8 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
return Object();
}
-static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
+static Object CompileGlobalEval(Isolate* isolate,
+ Handle<i::Object> source_object,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
int eval_scope_position, int eval_position) {
@@ -303,9 +304,15 @@ static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings().IsFalse(isolate) &&
- !Compiler::CodeGenerationFromStringsAllowed(isolate, native_context,
- source)) {
+ MaybeHandle<String> source;
+ bool unknown_object;
+ std::tie(source, unknown_object) = Compiler::ValidateDynamicCompilationSource(
+ isolate, native_context, source_object);
+ // If the argument is an unhandled string time, bounce to GlobalEval.
+ if (unknown_object) {
+ return native_context->global_eval_fun();
+ }
+ if (source.is_null()) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
Handle<Object> error;
@@ -321,9 +328,9 @@ static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<JSFunction> compiled;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, compiled,
- Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
- restriction, kNoSourcePosition,
- eval_scope_position, eval_position),
+ Compiler::GetFunctionFromEval(
+ source.ToHandleChecked(), outer_info, context, language_mode,
+ restriction, kNoSourcePosition, eval_scope_position, eval_position),
ReadOnlyRoots(isolate).exception());
return *compiled;
}
@@ -336,11 +343,7 @@ RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
// If "eval" didn't refer to the original GlobalEval, it's not a
// direct call to eval.
- // (And even if it is, but the first argument isn't a string, just let
- // execution default to an indirect call to eval, which will also return
- // the first argument without doing anything).
- if (*callee != isolate->native_context()->global_eval_fun() ||
- !args[1].IsString()) {
+ if (*callee != isolate->native_context()->global_eval_fun()) {
return *callee;
}
@@ -350,7 +353,7 @@ RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
DCHECK(args[4].IsSmi());
Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
isolate);
- return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
+ return CompileGlobalEval(isolate, args.at<Object>(1), outer_info,
language_mode, args.smi_at(4), args.smi_at(5));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index afe4a921e6..94320740af 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -283,8 +283,9 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(4, *is_revoked_str);
result->set(5, isolate->heap()->ToBoolean(js_proxy->IsRevoked()));
return factory->NewJSArrayWithElements(result);
- } else if (object->IsJSValue()) {
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+ } else if (object->IsJSPrimitiveWrapper()) {
+ Handle<JSPrimitiveWrapper> js_value =
+ Handle<JSPrimitiveWrapper>::cast(object);
Handle<FixedArray> result = factory->NewFixedArray(2);
Handle<String> primitive_value =
@@ -750,6 +751,23 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionEntered) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->PopPromise();
+ isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionResumed) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->PushPromise(promise);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
@@ -763,14 +781,6 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
return *promise;
}
-RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
- DCHECK_EQ(1, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 069ea88e12..7225e43012 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -65,6 +65,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
generator->set_context(isolate->context());
generator->set_receiver(*receiver);
generator->set_parameters_and_registers(*parameters_and_registers);
+ generator->set_resume_mode(JSGeneratorObject::ResumeMode::kNext);
generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
if (generator->IsJSAsyncGeneratorObject()) {
Handle<JSAsyncGeneratorObject>::cast(generator)->set_is_awaiting(0);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 21b1b1ef7c..4b8a0e38a1 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -8,11 +8,11 @@
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
#include "src/builtins/builtins.h"
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/numbers/conversions.h"
@@ -94,6 +94,22 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolAsyncIteratorInvalid) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, call(message_id, arg0, arg1, arg2));
RUNTIME_FUNCTION(Runtime_ThrowRangeError) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ DCHECK_LE(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
+
+ // If the result of a BigInt computation is truncated to 64 bit, Turbofan
+ // can sometimes truncate intermediate results already, which can prevent
+ // those from exceeding the maximum length, effectively preventing a
+ // RangeError from being thrown. As this is a performance optimization, this
+ // behavior is accepted. To prevent the correctness fuzzer from detecting
+ // this difference, we crash the program.
+ if (MessageTemplateFromInt(message_id_smi) ==
+ MessageTemplate::kBigIntTooBig) {
+ FATAL("Aborting on invalid BigInt length");
+ }
+ }
+
THROW_ERROR(isolate, args, NewRangeError);
}
@@ -287,13 +303,25 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ bool allow_large_object_allocation =
+ AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
CHECK(FLAG_young_generation_large_objects ||
size <= kMaxRegularHeapObjectSize);
- return *isolate->factory()->NewFillerObject(size, false,
+ if (!allow_large_object_allocation) {
+ CHECK(size <= kMaxRegularHeapObjectSize);
+ }
+
+ // TODO(v8:9472): Until double-aligned allocation is fixed for new-space
+ // allocations, don't request it.
+ double_align = false;
+
+ return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kYoung);
}
@@ -302,9 +330,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
+ bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ bool allow_large_object_allocation =
+ AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
- bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ if (!allow_large_object_allocation) {
+ CHECK(size <= kMaxRegularHeapObjectSize);
+ }
return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kOld);
}
@@ -695,7 +728,8 @@ RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared_info, 1);
CONVERT_SMI_ARG_CHECKED(slot_id, 2);
- Handle<Context> native_context(isolate->context().native_context(), isolate);
+ Handle<NativeContext> native_context(isolate->context().native_context(),
+ isolate);
return *TemplateObjectDescription::GetTemplateObject(
isolate, native_context, description, shared_info, slot_id);
}
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 48b4d2b6e7..1632554130 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -26,7 +26,9 @@ namespace internal {
namespace {
void AdvanceToOffsetForTracing(
- interpreter::BytecodeArrayIterator& bytecode_iterator, int offset) {
+ interpreter::BytecodeArrayIterator&
+ bytecode_iterator, // NOLINT(runtime/references)
+ int offset) {
while (bytecode_iterator.current_offset() +
bytecode_iterator.current_bytecode_size() <=
offset) {
@@ -39,7 +41,8 @@ void AdvanceToOffsetForTracing(
}
void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
- interpreter::BytecodeArrayIterator& bytecode_iterator,
+ interpreter::BytecodeArrayIterator&
+ bytecode_iterator, // NOLINT(runtime/references)
Handle<Object> accumulator) {
static const char kAccumulator[] = "accumulator";
static const int kRegFieldWidth = static_cast<int>(sizeof(kAccumulator) - 1);
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 67aa097484..0c7a28c279 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -33,10 +33,6 @@ void PreInitializeLiteralSite(Handle<FeedbackVector> vector,
vector->Set(slot, Smi::FromInt(1));
}
-Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
- Handle<Object> description,
- AllocationType allocation);
-
enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
template <class ContextObject>
@@ -86,14 +82,14 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
- if (object->map().is_deprecated()) {
- JSObject::MigrateInstance(object);
+ if (object->map(isolate).is_deprecated()) {
+ JSObject::MigrateInstance(isolate, object);
}
Handle<JSObject> copy;
if (copying) {
// JSFunction objects are not allowed to be in normal boilerplates at all.
- DCHECK(!object->IsJSFunction());
+ DCHECK(!object->IsJSFunction(isolate));
Handle<AllocationSite> site_to_pass;
if (site_context()->ShouldCreateMemento(object)) {
site_to_pass = site_context()->current();
@@ -111,23 +107,23 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
HandleScope scope(isolate);
// Deep copy own properties. Arrays only have 1 property "length".
- if (!copy->IsJSArray()) {
- if (copy->HasFastProperties()) {
- Handle<DescriptorArray> descriptors(copy->map().instance_descriptors(),
- isolate);
- int limit = copy->map().NumberOfOwnDescriptors();
+ if (!copy->IsJSArray(isolate)) {
+ if (copy->HasFastProperties(isolate)) {
+ Handle<DescriptorArray> descriptors(
+ copy->map(isolate).instance_descriptors(isolate), isolate);
+ int limit = copy->map(isolate).NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
DCHECK_EQ(kField, descriptors->GetDetails(i).location());
DCHECK_EQ(kData, descriptors->GetDetails(i).kind());
- FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
- if (copy->IsUnboxedDoubleField(index)) continue;
- Object raw = copy->RawFastPropertyAt(index);
- if (raw.IsJSObject()) {
+ FieldIndex index = FieldIndex::ForDescriptor(copy->map(isolate), i);
+ if (copy->IsUnboxedDoubleField(isolate, index)) continue;
+ Object raw = copy->RawFastPropertyAt(isolate, index);
+ if (raw.IsJSObject(isolate)) {
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
if (copying) copy->FastPropertyAtPut(index, *value);
- } else if (copying && raw.IsMutableHeapNumber()) {
+ } else if (copying && raw.IsMutableHeapNumber(isolate)) {
DCHECK(descriptors->GetDetails(i).representation().IsDouble());
uint64_t double_value = MutableHeapNumber::cast(raw).value_as_bits();
auto value =
@@ -136,11 +132,12 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
} else {
- Handle<NameDictionary> dict(copy->property_dictionary(), isolate);
- for (int i = 0; i < dict->Capacity(); i++) {
- Object raw = dict->ValueAt(i);
- if (!raw.IsJSObject()) continue;
- DCHECK(dict->KeyAt(i).IsName());
+ Handle<NameDictionary> dict(copy->property_dictionary(isolate), isolate);
+ int capacity = dict->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object raw = dict->ValueAt(isolate, i);
+ if (!raw.IsJSObject(isolate)) continue;
+ DCHECK(dict->KeyAt(isolate, i).IsName());
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -149,19 +146,21 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
// Assume non-arrays don't end up having elements.
- if (copy->elements().length() == 0) return copy;
+ if (copy->elements(isolate).length() == 0) return copy;
}
// Deep copy own elements.
- switch (copy->GetElementsKind()) {
+ switch (copy->GetElementsKind(isolate)) {
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case HOLEY_FROZEN_ELEMENTS:
case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(copy->elements()), isolate);
- if (elements->map() == ReadOnlyRoots(isolate).fixed_cow_array_map()) {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements(isolate)),
+ isolate);
+ if (elements->map(isolate) ==
+ ReadOnlyRoots(isolate).fixed_cow_array_map()) {
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
DCHECK(!elements->get(i).IsJSObject());
@@ -169,8 +168,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
#endif
} else {
for (int i = 0; i < elements->length(); i++) {
- Object raw = elements->get(i);
- if (!raw.IsJSObject()) continue;
+ Object raw = elements->get(isolate, i);
+ if (!raw.IsJSObject(isolate)) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -180,12 +179,12 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> element_dictionary(copy->element_dictionary(),
- isolate);
+ Handle<NumberDictionary> element_dictionary(
+ copy->element_dictionary(isolate), isolate);
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object raw = element_dictionary->ValueAt(i);
- if (!raw.IsJSObject()) continue;
+ Object raw = element_dictionary->ValueAt(isolate, i);
+ if (!raw.IsJSObject(isolate)) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -324,155 +323,182 @@ MaybeHandle<JSObject> DeepCopy(Handle<JSObject> object,
return copy;
}
+Handle<JSObject> CreateObjectLiteral(
+ Isolate* isolate,
+ Handle<ObjectBoilerplateDescription> object_boilerplate_description,
+ int flags, AllocationType allocation);
+
+Handle<JSObject> CreateArrayLiteral(
+ Isolate* isolate,
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description,
+ AllocationType allocation);
+
struct ObjectLiteralHelper {
- static Handle<JSObject> Create(Isolate* isolate,
- Handle<HeapObject> description, int flags,
- AllocationType allocation) {
- Handle<NativeContext> native_context = isolate->native_context();
+ static inline Handle<JSObject> Create(Isolate* isolate,
+ Handle<HeapObject> description,
+ int flags, AllocationType allocation) {
Handle<ObjectBoilerplateDescription> object_boilerplate_description =
Handle<ObjectBoilerplateDescription>::cast(description);
- bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
-
- // In case we have function literals, we want the object to be in
- // slow properties mode for now. We don't go in the map cache because
- // maps with constant functions can't be shared if the functions are
- // not the same (which is the common case).
- int number_of_properties =
- object_boilerplate_description->backing_store_size();
-
- // Ignoring number_of_properties for force dictionary map with
- // __proto__:null.
- Handle<Map> map =
- has_null_prototype
- ? handle(native_context->slow_object_with_null_prototype_map(),
- isolate)
- : isolate->factory()->ObjectLiteralMapFromCache(
- native_context, number_of_properties);
-
- Handle<JSObject> boilerplate =
- map->is_dictionary_map()
- ? isolate->factory()->NewSlowJSObjectFromMap(
- map, number_of_properties, allocation)
- : isolate->factory()->NewJSObjectFromMap(map, allocation);
-
- // Normalize the elements of the boilerplate to save space if needed.
- if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
-
- // Add the constant properties to the boilerplate.
- int length = object_boilerplate_description->size();
- // TODO(verwaest): Support tracking representations in the boilerplate.
- for (int index = 0; index < length; index++) {
- Handle<Object> key(object_boilerplate_description->name(index), isolate);
- Handle<Object> value(object_boilerplate_description->value(index),
- isolate);
-
- if (value->IsObjectBoilerplateDescription() ||
- value->IsArrayBoilerplateDescription()) {
- value = InnerCreateBoilerplate(isolate, value, allocation);
- }
- uint32_t element_index = 0;
- if (key->ToArrayIndex(&element_index)) {
- // Array index (uint32).
- if (value->IsUninitialized(isolate)) {
- value = handle(Smi::kZero, isolate);
- }
- JSObject::SetOwnElementIgnoreAttributes(boilerplate, element_index,
- value, NONE)
- .Check();
- } else {
- Handle<String> name = Handle<String>::cast(key);
- DCHECK(!name->AsArrayIndex(&element_index));
- JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name, value, NONE)
- .Check();
- }
- }
-
- if (map->is_dictionary_map() && !has_null_prototype) {
- // TODO(cbruni): avoid making the boilerplate fast again, the clone stub
- // supports dict-mode objects directly.
- JSObject::MigrateSlowToFast(boilerplate,
- boilerplate->map().UnusedPropertyFields(),
- "FastLiteral");
- }
- return boilerplate;
+ return CreateObjectLiteral(isolate, object_boilerplate_description, flags,
+ allocation);
}
};
struct ArrayLiteralHelper {
- static Handle<JSObject> Create(Isolate* isolate,
- Handle<HeapObject> description, int flags,
- AllocationType allocation) {
+ static inline Handle<JSObject> Create(Isolate* isolate,
+ Handle<HeapObject> description,
+ int flags_not_used,
+ AllocationType allocation) {
Handle<ArrayBoilerplateDescription> array_boilerplate_description =
Handle<ArrayBoilerplateDescription>::cast(description);
+ return CreateArrayLiteral(isolate, array_boilerplate_description,
+ allocation);
+ }
+};
- ElementsKind constant_elements_kind =
- array_boilerplate_description->elements_kind();
-
- Handle<FixedArrayBase> constant_elements_values(
- array_boilerplate_description->constant_elements(), isolate);
+Handle<JSObject> CreateObjectLiteral(
+ Isolate* isolate,
+ Handle<ObjectBoilerplateDescription> object_boilerplate_description,
+ int flags, AllocationType allocation) {
+ Handle<NativeContext> native_context = isolate->native_context();
+ bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
+ bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
+
+ // In case we have function literals, we want the object to be in
+ // slow properties mode for now. We don't go in the map cache because
+ // maps with constant functions can't be shared if the functions are
+ // not the same (which is the common case).
+ int number_of_properties =
+ object_boilerplate_description->backing_store_size();
+
+ // Ignoring number_of_properties for force dictionary map with
+ // __proto__:null.
+ Handle<Map> map =
+ has_null_prototype
+ ? handle(native_context->slow_object_with_null_prototype_map(),
+ isolate)
+ : isolate->factory()->ObjectLiteralMapFromCache(native_context,
+ number_of_properties);
+
+ Handle<JSObject> boilerplate =
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(
+ map, number_of_properties, allocation);
+
+ // Normalize the elements of the boilerplate to save space if needed.
+ if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
+
+ // Add the constant properties to the boilerplate.
+ int length = object_boilerplate_description->size();
+ // TODO(verwaest): Support tracking representations in the boilerplate.
+ for (int index = 0; index < length; index++) {
+ Handle<Object> key(object_boilerplate_description->name(isolate, index),
+ isolate);
+ Handle<Object> value(object_boilerplate_description->value(isolate, index),
+ isolate);
+
+ if (value->IsHeapObject()) {
+ if (HeapObject::cast(*value).IsArrayBoilerplateDescription(isolate)) {
+ Handle<ArrayBoilerplateDescription> boilerplate =
+ Handle<ArrayBoilerplateDescription>::cast(value);
+ value = CreateArrayLiteral(isolate, boilerplate, allocation);
+
+ } else if (HeapObject::cast(*value).IsObjectBoilerplateDescription(
+ isolate)) {
+ Handle<ObjectBoilerplateDescription> boilerplate =
+ Handle<ObjectBoilerplateDescription>::cast(value);
+ value = CreateObjectLiteral(isolate, boilerplate, boilerplate->flags(),
+ allocation);
+ }
+ }
- // Create the JSArray.
- Handle<FixedArrayBase> copied_elements_values;
- if (IsDoubleElementsKind(constant_elements_kind)) {
- copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
- Handle<FixedDoubleArray>::cast(constant_elements_values));
+ uint32_t element_index = 0;
+ if (key->ToArrayIndex(&element_index)) {
+ // Array index (uint32).
+ if (value->IsUninitialized(isolate)) {
+ value = handle(Smi::kZero, isolate);
+ }
+ JSObject::SetOwnElementIgnoreAttributes(boilerplate, element_index, value,
+ NONE)
+ .Check();
} else {
- DCHECK(IsSmiOrObjectElementsKind(constant_elements_kind));
- const bool is_cow = (constant_elements_values->map() ==
- ReadOnlyRoots(isolate).fixed_cow_array_map());
- if (is_cow) {
- copied_elements_values = constant_elements_values;
-#if DEBUG
+ Handle<String> name = Handle<String>::cast(key);
+ DCHECK(!name->AsArrayIndex(&element_index));
+ JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name, value, NONE)
+ .Check();
+ }
+ }
+
+ if (map->is_dictionary_map() && !has_null_prototype) {
+ // TODO(cbruni): avoid making the boilerplate fast again, the clone stub
+ // supports dict-mode objects directly.
+ JSObject::MigrateSlowToFast(
+ boilerplate, boilerplate->map().UnusedPropertyFields(), "FastLiteral");
+ }
+ return boilerplate;
+}
+
+Handle<JSObject> CreateArrayLiteral(
+ Isolate* isolate,
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description,
+ AllocationType allocation) {
+ ElementsKind constant_elements_kind =
+ array_boilerplate_description->elements_kind();
+
+ Handle<FixedArrayBase> constant_elements_values(
+ array_boilerplate_description->constant_elements(isolate), isolate);
+
+ // Create the JSArray.
+ Handle<FixedArrayBase> copied_elements_values;
+ if (IsDoubleElementsKind(constant_elements_kind)) {
+ copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
+ Handle<FixedDoubleArray>::cast(constant_elements_values));
+ } else {
+ DCHECK(IsSmiOrObjectElementsKind(constant_elements_kind));
+ const bool is_cow = (constant_elements_values->map(isolate) ==
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
+ if (is_cow) {
+ copied_elements_values = constant_elements_values;
+ if (DEBUG_BOOL) {
Handle<FixedArray> fixed_array_values =
Handle<FixedArray>::cast(copied_elements_values);
for (int i = 0; i < fixed_array_values->length(); i++) {
DCHECK(!fixed_array_values->get(i).IsFixedArray());
}
-#endif
- } else {
- Handle<FixedArray> fixed_array_values =
- Handle<FixedArray>::cast(constant_elements_values);
- Handle<FixedArray> fixed_array_values_copy =
- isolate->factory()->CopyFixedArray(fixed_array_values);
- copied_elements_values = fixed_array_values_copy;
- FOR_WITH_HANDLE_SCOPE(
- isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
- Handle<Object> value(fixed_array_values->get(i), isolate);
-
- if (value->IsArrayBoilerplateDescription() ||
- value->IsObjectBoilerplateDescription()) {
- Handle<Object> result =
- InnerCreateBoilerplate(isolate, value, allocation);
- fixed_array_values_copy->set(i, *result);
- }
- });
+ }
+ } else {
+ Handle<FixedArray> fixed_array_values =
+ Handle<FixedArray>::cast(constant_elements_values);
+ Handle<FixedArray> fixed_array_values_copy =
+ isolate->factory()->CopyFixedArray(fixed_array_values);
+ copied_elements_values = fixed_array_values_copy;
+ for (int i = 0; i < fixed_array_values->length(); i++) {
+ Object value = fixed_array_values_copy->get(isolate, i);
+ HeapObject value_heap_object;
+ if (value.GetHeapObject(isolate, &value_heap_object)) {
+ if (value_heap_object.IsArrayBoilerplateDescription(isolate)) {
+ HandleScope sub_scope(isolate);
+ Handle<ArrayBoilerplateDescription> boilerplate(
+ ArrayBoilerplateDescription::cast(value_heap_object), isolate);
+ Handle<JSObject> result =
+ CreateArrayLiteral(isolate, boilerplate, allocation);
+ fixed_array_values_copy->set(i, *result);
+
+ } else if (value_heap_object.IsObjectBoilerplateDescription(
+ isolate)) {
+ HandleScope sub_scope(isolate);
+ Handle<ObjectBoilerplateDescription> boilerplate(
+ ObjectBoilerplateDescription::cast(value_heap_object), isolate);
+ Handle<JSObject> result = CreateObjectLiteral(
+ isolate, boilerplate, boilerplate->flags(), allocation);
+ fixed_array_values_copy->set(i, *result);
+ }
+ }
}
}
-
- return isolate->factory()->NewJSArrayWithElements(
- copied_elements_values, constant_elements_kind,
- copied_elements_values->length(), allocation);
- }
-};
-
-Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
- Handle<Object> description,
- AllocationType allocation) {
- if (description->IsObjectBoilerplateDescription()) {
- Handle<ObjectBoilerplateDescription> object_boilerplate_description =
- Handle<ObjectBoilerplateDescription>::cast(description);
- return ObjectLiteralHelper::Create(isolate, object_boilerplate_description,
- object_boilerplate_description->flags(),
- allocation);
- } else {
- DCHECK(description->IsArrayBoilerplateDescription());
- Handle<ArrayBoilerplateDescription> array_boilerplate_description =
- Handle<ArrayBoilerplateDescription>::cast(description);
- return ArrayLiteralHelper::Create(
- isolate, array_boilerplate_description,
- array_boilerplate_description->elements_kind(), allocation);
}
+ return isolate->factory()->NewJSArrayWithElements(
+ copied_elements_values, constant_elements_kind,
+ copied_elements_values->length(), allocation);
}
inline DeepCopyHints DecodeCopyHints(int flags) {
@@ -556,6 +582,7 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
usage_context.ExitScope(site, boilerplate);
return copy;
}
+
} // namespace
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 41f21865a6..eb21e0a9a4 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -5,8 +5,8 @@
#include "src/execution/arguments-inl.h"
#include "src/logging/counters.h"
#include "src/objects/js-promise.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/source-text-module.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -33,14 +33,14 @@ RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(module_request, 0);
- Handle<Module> module(isolate->context().module(), isolate);
- return *Module::GetModuleNamespace(isolate, module, module_request);
+ Handle<SourceTextModule> module(isolate->context().module(), isolate);
+ return *SourceTextModule::GetModuleNamespace(isolate, module, module_request);
}
RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- Handle<Module> module(isolate->context().module(), isolate);
+ Handle<SourceTextModule> module(isolate->context().module(), isolate);
return *isolate->RunHostInitializeImportMetaObjectCallback(module);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 8b94d83f31..25bd07b535 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -217,6 +217,8 @@ RUNTIME_FUNCTION(Runtime_ObjectGetOwnPropertyNames) {
Object::ToObject(isolate, object));
// Collect the own keys for the {receiver}.
+ // TODO(v8:9401): We should extend the fast path of KeyAccumulator::GetKeys to
+ // also use fast path even when filter = SKIP_SYMBOLS.
Handle<FixedArray> keys;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, keys,
@@ -304,7 +306,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
}
Map map = js_obj->map();
- if (!map.has_hidden_prototype() &&
+ if (!map.IsJSGlobalProxyMap() &&
(key_is_array_index ? !map.has_indexed_interceptor()
: !map.has_named_interceptor())) {
return ReadOnlyRoots(isolate).false_value();
@@ -440,8 +442,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
// Conservative upper limit to prevent fuzz tests from going OOM.
if (properties > 100000) return isolate->ThrowIllegalOperation();
if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties,
- "OptimizeForAdding");
+ JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES,
+ properties, "OptimizeForAdding");
}
return *object;
}
@@ -502,6 +504,76 @@ RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
return *isolate->factory()->NewJSArrayWithElements(entries);
}
+RUNTIME_FUNCTION(Runtime_ObjectIsExtensible) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+
+ Maybe<bool> result =
+ object->IsJSReceiver()
+ ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
+ : Just(false);
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsThrow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+
+ MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
+ kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
+ return *object;
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsDontThrow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+
+ Maybe<bool> result = JSReceiver::PreventExtensions(
+ Handle<JSReceiver>::cast(object), kDontThrow);
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverGetPrototypeOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSReceiver::GetPrototype(isolate, receiver));
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfThrow) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, proto, 1);
+
+ MAYBE_RETURN(JSReceiver::SetPrototype(object, proto, true, kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
+
+ return *object;
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfDontThrow) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, proto, 1);
+
+ Maybe<bool> result =
+ JSReceiver::SetPrototype(object, proto, true, kDontThrow);
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -786,7 +858,7 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
// code where we can't handle lazy deopts for lack of a suitable bailout
// ID. So we just try migration and signal failure if necessary,
// which will also trigger a deopt.
- if (!JSObject::TryMigrateInstance(js_object)) return Smi::kZero;
+ if (!JSObject::TryMigrateInstance(isolate, js_object)) return Smi::kZero;
return *object;
}
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index dd07234a4a..2543b3f5d4 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -86,5 +86,17 @@ RUNTIME_FUNCTION(Runtime_CheckProxyHasTrapResult) {
return isolate->heap()->ToBoolean(result.FromJust());
}
+RUNTIME_FUNCTION(Runtime_CheckProxyDeleteTrapResult) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
+
+ Maybe<bool> result = JSProxy::CheckDeleteTrap(isolate, name, target);
+ if (!result.IsJust()) return ReadOnlyRoots(isolate).exception();
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 85c9ebcb1b..76056a7823 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -4,15 +4,16 @@
#include <functional>
+#include "src/common/message-template.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/logging/counters.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/regexp/jsregexp-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/regexp/regexp-utils.h"
+#include "src/regexp/regexp.h"
#include "src/runtime/runtime-utils.h"
#include "src/strings/string-builder-inl.h"
#include "src/strings/string-search.h"
@@ -594,8 +595,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
}
int32_t match_indices[] = {indices->back(), indices->back() + pattern_len};
- RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, 0,
- match_indices);
+ RegExp::SetLastMatchInfo(isolate, last_match_info, subject, 0, match_indices);
TruncateRegexpIndicesList(isolate);
@@ -614,7 +614,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
JSRegExp::Type typeTag = regexp->TypeTag();
if (typeTag == JSRegExp::IRREGEXP) {
// Ensure the RegExp is compiled so we can access the capture-name map.
- if (RegExpImpl::IrregexpPrepare(isolate, regexp, subject) == -1) {
+ if (RegExp::IrregexpPrepare(isolate, regexp, subject) == -1) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
}
@@ -638,7 +638,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
+ RegExpGlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
int32_t* current_match = global_cache.FetchNext();
@@ -679,8 +679,8 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
builder.AddSubjectSlice(prev, subject_length);
}
- RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
- global_cache.LastSuccessfulMatch());
+ RegExp::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
RETURN_RESULT_OR_FAILURE(isolate, builder.ToString());
}
@@ -703,7 +703,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
+ RegExpGlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
int32_t* current_match = global_cache.FetchNext();
@@ -749,8 +749,8 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
- RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
- global_cache.LastSuccessfulMatch());
+ RegExp::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
if (prev < subject_length) {
// Add substring subject[prev;length] to answer string.
@@ -877,8 +877,8 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
CHECK_LE(0, index);
CHECK_GE(subject->length(), index);
isolate->counters()->regexp_entry_runtime()->Increment();
- RETURN_RESULT_OR_FAILURE(isolate, RegExpImpl::Exec(isolate, regexp, subject,
- index, last_match_info));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RegExp::Exec(isolate, regexp, subject, index, last_match_info));
}
namespace {
@@ -1108,14 +1108,14 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
isolate->factory()->CopyFixedArrayWithMap(
cached_fixed_array, isolate->factory()->fixed_array_map());
JSArray::SetContent(result_array, copied_fixed_array);
- RegExpImpl::SetLastMatchInfo(isolate, last_match_array, subject,
- capture_count, last_match);
+ RegExp::SetLastMatchInfo(isolate, last_match_array, subject,
+ capture_count, last_match);
DeleteArray(last_match);
return *result_array;
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
+ RegExpGlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
// Ensured in Runtime_RegExpExecMultiple.
@@ -1216,9 +1216,8 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
subject_length);
}
- RegExpImpl::SetLastMatchInfo(isolate, last_match_array, subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
+ RegExp::SetLastMatchInfo(isolate, last_match_array, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
if (subject_length > kMinLengthToCache) {
// Store the last successful match into the array for caching.
@@ -1282,10 +1281,10 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
// A lastIndex exceeding the string length always returns null (signalling
// failure) in RegExpBuiltinExec, thus we can skip the call.
if (last_index <= static_cast<uint32_t>(string->length())) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, match_indices_obj,
- RegExpImpl::Exec(isolate, regexp, string,
- last_index, last_match_info),
- String);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, match_indices_obj,
+ RegExp::Exec(isolate, regexp, string, last_index, last_match_info),
+ String);
}
if (match_indices_obj->IsNull(isolate)) {
@@ -1414,8 +1413,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
if (last_index <= static_cast<uint32_t>(subject->length())) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, match_indices_obj,
- RegExpImpl::Exec(isolate, regexp, subject, last_index,
- last_match_info));
+ RegExp::Exec(isolate, regexp, subject, last_index, last_match_info));
}
if (match_indices_obj->IsNull(isolate)) {
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 25d10e3395..f67b6922bf 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -6,11 +6,11 @@
#include "src/ast/scopes.h"
#include "src/builtins/accessors.h"
+#include "src/common/message-template.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -240,7 +240,7 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
Handle<Object> holder =
Context::Lookup(context, name, DONT_FOLLOW_CHAINS, &index, &attributes,
&init_flag, &mode);
- DCHECK(holder.is_null() || !holder->IsModule());
+ DCHECK(holder.is_null() || !holder->IsSourceTextModule());
DCHECK(!isolate->has_pending_exception());
Handle<JSObject> object;
@@ -715,7 +715,7 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
RUNTIME_FUNCTION(Runtime_PushModuleContext) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Module, module, 0);
+ CONVERT_ARG_HANDLE_CHECKED(SourceTextModule, module, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
Handle<NativeContext> outer(NativeContext::cast(isolate->context()), isolate);
@@ -773,7 +773,7 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
// If the slot was found in a context or in module imports and exports it
// should be DONT_DELETE.
- if (holder->IsContext() || holder->IsModule()) {
+ if (holder->IsContext() || holder->IsSourceTextModule()) {
return ReadOnlyRoots(isolate).false_value();
}
@@ -801,10 +801,11 @@ MaybeHandle<Object> LoadLookupSlot(Isolate* isolate, Handle<String> name,
&attributes, &flag, &mode);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
- if (!holder.is_null() && holder->IsModule()) {
+ if (!holder.is_null() && holder->IsSourceTextModule()) {
Handle<Object> receiver = isolate->factory()->undefined_value();
if (receiver_return) *receiver_return = receiver;
- return Module::LoadVariable(isolate, Handle<Module>::cast(holder), index);
+ return SourceTextModule::LoadVariable(
+ isolate, Handle<SourceTextModule>::cast(holder), index);
}
if (index != Context::kNotFound) {
DCHECK(holder->IsContext());
@@ -903,9 +904,10 @@ MaybeHandle<Object> StoreLookupSlot(
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
- } else if (holder->IsModule()) {
+ } else if (holder->IsSourceTextModule()) {
if ((attributes & READ_ONLY) == 0) {
- Module::StoreVariable(Handle<Module>::cast(holder), index, value);
+ SourceTextModule::StoreVariable(Handle<SourceTextModule>::cast(holder),
+ index, value);
} else {
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kConstAssign, name), Object);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 2e2918e47d..2ddd9d13f7 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -10,7 +10,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
-#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime-utils.h"
#include "src/strings/string-builder-inl.h"
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 85a50fca61..f0caaaa14c 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -11,6 +11,7 @@
#include "src/base/platform/mutex.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compiler.h"
+#include "src/codegen/pending-optimization-table.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
@@ -218,28 +219,6 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
isolate->concurrent_recompilation_enabled());
}
-namespace {
-
-void RemoveBytecodeFromPendingOptimizeTable(v8::internal::Isolate* isolate,
- Handle<JSFunction> function) {
- // TODO(mythria): Remove the check for undefined, once we fix all tests to
- // add PrepareForOptimization when using OptimizeFunctionOnNextCall.
- if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
- return;
- }
-
- Handle<ObjectHashTable> table =
- handle(ObjectHashTable::cast(
- isolate->heap()->pending_optimize_for_test_bytecode()),
- isolate);
- bool was_present;
- table = table->Remove(isolate, table, handle(function->shared(), isolate),
- &was_present);
- isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
-}
-
-} // namespace
-
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
@@ -271,9 +250,9 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return ReadOnlyRoots(isolate).undefined_value();
}
- if (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
- BailoutReason::kNeverOptimize) {
+ if (!FLAG_opt || (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize)) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -281,20 +260,15 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // Check we called PrepareFunctionForOptimization and hold the bytecode
- // array to prevent it from getting flushed.
- // TODO(mythria): Enable this check once we add PrepareForOptimization in all
- // tests before calling OptimizeFunctionOnNextCall.
- // CHECK(!ObjectHashTable::cast(
- // isolate->heap()->pending_optimize_for_test_bytecode())
- // ->Lookup(handle(function->shared(), isolate))
- // ->IsTheHole());
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::MarkedForOptimization(isolate, function);
+ }
if (function->HasOptimizedCode()) {
DCHECK(function->IsOptimized() || function->ChecksOptimizationMarker());
- // If function is already optimized, remove the bytecode array from the
- // pending optimize for test table and return.
- RemoveBytecodeFromPendingOptimizeTable(isolate, function);
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::FunctionWasOptimized(isolate, function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -359,8 +333,10 @@ bool EnsureFeedbackVector(Handle<JSFunction> function) {
RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
+ if (!args[0].IsJSFunction()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
EnsureFeedbackVector(function);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -368,6 +344,9 @@ RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
+ if (!args[0].IsJSFunction()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (!EnsureFeedbackVector(function)) {
@@ -389,16 +368,9 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
- Handle<ObjectHashTable> table =
- isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
- ? ObjectHashTable::New(isolate, 1)
- : handle(ObjectHashTable::cast(
- isolate->heap()->pending_optimize_for_test_bytecode()),
- isolate);
- table = ObjectHashTable::Put(
- table, handle(function->shared(), isolate),
- handle(function->shared().GetBytecodeArray(), isolate));
- isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::PreparedForOptimization(isolate, function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -418,26 +390,23 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
if (!it.done()) function = handle(it.frame()->function(), isolate);
if (function.is_null()) return ReadOnlyRoots(isolate).undefined_value();
- if (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
- BailoutReason::kNeverOptimize) {
+ if (!FLAG_opt || (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize)) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // Check we called PrepareFunctionForOptimization and hold the bytecode
- // array to prevent it from getting flushed.
- // TODO(mythria): Enable this check once we add PrepareForOptimization in all
- // tests before calling OptimizeOsr.
- // CHECK(!ObjectHashTable::cast(
- // isolate->heap()->pending_optimize_for_test_bytecode())
- // ->Lookup(handle(function->shared(), isolate))
- // ->IsTheHole());
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::MarkedForOptimization(isolate, function);
+ }
if (function->HasOptimizedCode()) {
DCHECK(function->IsOptimized() || function->ChecksOptimizationMarker());
// If function is already optimized, remove the bytecode array from the
// pending optimize for test table and return.
- RemoveBytecodeFromPendingOptimizeTable(isolate, function);
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::FunctionWasOptimized(isolate, function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -593,14 +562,11 @@ RUNTIME_FUNCTION(Runtime_GetUndetectable) {
}
static void call_as_function(const v8::FunctionCallbackInfo<v8::Value>& args) {
- double v1 = args[0]
- ->NumberValue(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToChecked();
- double v2 = args[1]
- ->NumberValue(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToChecked();
- args.GetReturnValue().Set(
- v8::Number::New(v8::Isolate::GetCurrent(), v1 - v2));
+ double v1 =
+ args[0]->NumberValue(args.GetIsolate()->GetCurrentContext()).ToChecked();
+ double v2 =
+ args[1]->NumberValue(args.GetIsolate()->GetCurrentContext()).ToChecked();
+ args.GetReturnValue().Set(v8::Number::New(args.GetIsolate(), v1 - v2));
}
// Returns a callable object. The object returns the difference of its two
@@ -624,6 +590,9 @@ RUNTIME_FUNCTION(Runtime_GetCallable) {
RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
+ if (!args[0].IsJSFunction()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->ClearTypeFeedbackInfo();
return ReadOnlyRoots(isolate).undefined_value();
@@ -832,7 +801,6 @@ RUNTIME_FUNCTION(Runtime_Abort) {
UNREACHABLE();
}
-
RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -847,6 +815,16 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
UNREACHABLE();
}
+RUNTIME_FUNCTION(Runtime_AbortCSAAssert) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ base::OS::PrintError("abort: CSA_ASSERT failed: %s\n",
+ message->ToCString().get());
+ isolate->PrintStack(stderr);
+ base::OS::Abort();
+ UNREACHABLE();
+}
RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
HandleScope scope(isolate);
@@ -1153,6 +1131,19 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
return *module_object;
}
+// Create a new Module object using the same NativeModule.
+RUNTIME_FUNCTION(Runtime_CloneWasmModule) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_object, 0);
+
+ Handle<WasmModuleObject> new_module_object =
+ wasm::WasmEngine::GetWasmEngine()->ImportNativeModule(
+ isolate, module_object->shared_native_module());
+
+ return *new_module_object;
+}
+
RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 1736ee3939..7fab051cbf 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/common/message-template.h"
#include "src/execution/arguments-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 288bfa1141..65acb296cc 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
+#include "src/common/message-template.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frame-constants.h"
-#include "src/execution/message-template.h"
+#include "src/execution/frames.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/numbers/conversions.h"
@@ -62,7 +63,7 @@ Object ThrowWasmError(Isolate* isolate, MessageTemplate message) {
}
} // namespace
-RUNTIME_FUNCTION(Runtime_WasmIsValidAnyFuncValue) {
+RUNTIME_FUNCTION(Runtime_WasmIsValidFuncRefValue) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, function, 0);
@@ -209,12 +210,13 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
// methods that could trigger a GC are being called.
Address arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_params; ++i) {
-#define CASE_ARG_TYPE(type, ctype) \
- case wasm::type: \
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)), \
- sizeof(ctype)); \
- wasm_args[i] = wasm::WasmValue(ReadUnalignedValue<ctype>(arg_buf_ptr)); \
- arg_buf_ptr += sizeof(ctype); \
+#define CASE_ARG_TYPE(type, ctype) \
+ case wasm::type: \
+ DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)), \
+ sizeof(ctype)); \
+ wasm_args[i] = \
+ wasm::WasmValue(base::ReadUnalignedValue<ctype>(arg_buf_ptr)); \
+ arg_buf_ptr += sizeof(ctype); \
break;
switch (sig->GetParam(i)) {
CASE_ARG_TYPE(kWasmI32, uint32_t)
@@ -223,11 +225,12 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
CASE_ARG_TYPE(kWasmF64, double)
#undef CASE_ARG_TYPE
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef: {
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef: {
DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)),
kSystemPointerSize);
- Handle<Object> ref(ReadUnalignedValue<Object>(arg_buf_ptr), isolate);
+ Handle<Object> ref(base::ReadUnalignedValue<Object>(arg_buf_ptr),
+ isolate);
wasm_args[i] = wasm::WasmValue(ref);
arg_buf_ptr += kSystemPointerSize;
break;
@@ -259,12 +262,12 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
// also un-boxes reference types from handles into raw pointers.
arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_returns; ++i) {
-#define CASE_RET_TYPE(type, ctype) \
- case wasm::type: \
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)), \
- sizeof(ctype)); \
- WriteUnalignedValue<ctype>(arg_buf_ptr, wasm_rets[i].to<ctype>()); \
- arg_buf_ptr += sizeof(ctype); \
+#define CASE_RET_TYPE(type, ctype) \
+ case wasm::type: \
+ DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)), \
+ sizeof(ctype)); \
+ base::WriteUnalignedValue<ctype>(arg_buf_ptr, wasm_rets[i].to<ctype>()); \
+ arg_buf_ptr += sizeof(ctype); \
break;
switch (sig->GetReturn(i)) {
CASE_RET_TYPE(kWasmI32, uint32_t)
@@ -273,11 +276,12 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
CASE_RET_TYPE(kWasmF64, double)
#undef CASE_RET_TYPE
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef: {
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef: {
DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)),
kSystemPointerSize);
- WriteUnalignedValue<Object>(arg_buf_ptr, *wasm_rets[i].to_anyref());
+ base::WriteUnalignedValue<Object>(arg_buf_ptr,
+ *wasm_rets[i].to_anyref());
arg_buf_ptr += kSystemPointerSize;
break;
}
@@ -476,116 +480,6 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmIndirectCallCheckSignatureAndGetTargetInstance) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_UINT32_ARG_CHECKED(entry_index, 1);
- CONVERT_UINT32_ARG_CHECKED(sig_index, 2);
- DCHECK(isolate->context().is_null());
- isolate->set_context(instance->native_context());
-
- DCHECK_LT(table_index, instance->tables().length());
- auto table_obj = handle(
- WasmTableObject::cast(instance->tables().get(table_index)), isolate);
-
- // This check is already done in generated code.
- DCHECK(WasmTableObject::IsInBounds(isolate, table_obj, entry_index));
-
- bool is_valid;
- bool is_null;
- MaybeHandle<WasmInstanceObject> maybe_target_instance;
- int function_index;
- WasmTableObject::GetFunctionTableEntry(
- isolate, table_obj, entry_index, &is_valid, &is_null,
- &maybe_target_instance, &function_index);
-
- CHECK(is_valid);
- if (is_null) {
- // We throw a signature mismatch trap to be in sync with the generated
- // code. There we do a signature check instead of a null-check. Trap
- // reasons are not defined in the spec. Otherwise, a null-check is
- // performed before a signature, according to the spec.
- return ThrowWasmError(isolate, MessageTemplate::kWasmTrapFuncSigMismatch);
- }
-
- // Now we do the signature check.
- Handle<WasmInstanceObject> target_instance =
- maybe_target_instance.ToHandleChecked();
-
- const wasm::WasmModule* target_module =
- target_instance->module_object().native_module()->module();
-
- wasm::FunctionSig* target_sig = target_module->functions[function_index].sig;
-
- auto target_sig_id = instance->module()->signature_map.Find(*target_sig);
- uint32_t expected_sig_id = instance->module()->signature_ids[sig_index];
-
- if (expected_sig_id != static_cast<uint32_t>(target_sig_id)) {
- return ThrowWasmError(isolate, MessageTemplate::kWasmTrapFuncSigMismatch);
- }
-
- if (function_index <
- static_cast<int>(target_instance->module()->num_imported_functions)) {
- // The function in the target instance was imported. Use its imports table,
- // which contains a tuple needed by the import wrapper.
- ImportedFunctionEntry entry(target_instance, function_index);
- return entry.object_ref();
- }
- return *target_instance;
-}
-
-RUNTIME_FUNCTION(Runtime_WasmIndirectCallGetTargetAddress) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_UINT32_ARG_CHECKED(entry_index, 1);
-
- DCHECK_LT(table_index, instance->tables().length());
- auto table_obj = handle(
- WasmTableObject::cast(instance->tables().get(table_index)), isolate);
-
- DCHECK(WasmTableObject::IsInBounds(isolate, table_obj, entry_index));
-
- bool is_valid;
- bool is_null;
- MaybeHandle<WasmInstanceObject> maybe_target_instance;
- int function_index;
- WasmTableObject::GetFunctionTableEntry(
- isolate, table_obj, entry_index, &is_valid, &is_null,
- &maybe_target_instance, &function_index);
-
- CHECK(is_valid);
- // The null-check should already have been done in
- // Runtime_WasmIndirectCallCheckSignatureAndGetTargetInstance. That runtime
- // function should always be called first.
- CHECK(!is_null);
-
- Handle<WasmInstanceObject> target_instance =
- maybe_target_instance.ToHandleChecked();
-
- Address call_target = 0;
- if (function_index <
- static_cast<int>(target_instance->module()->num_imported_functions)) {
- // The function in the target instance was imported. Use its imports table,
- // which contains a tuple needed by the import wrapper.
- ImportedFunctionEntry entry(target_instance, function_index);
- call_target = entry.target();
- } else {
- // The function in the target instance was not imported.
- call_target = target_instance->GetCallTarget(function_index);
- }
-
- // The return value is an address and not a SMI. However, the address is
- // always aligned, and a SMI uses the same space as {Address}.
- CHECK(HAS_SMI_TAG(call_target));
- return Smi(call_target);
-}
-
RUNTIME_FUNCTION(Runtime_WasmTableInit) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -609,16 +503,18 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
auto instance =
Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_src_index, 0);
- CONVERT_UINT32_ARG_CHECKED(table_dst_index, 1);
+ CONVERT_UINT32_ARG_CHECKED(table_dst_index, 0);
+ CONVERT_UINT32_ARG_CHECKED(table_src_index, 1);
CONVERT_UINT32_ARG_CHECKED(dst, 2);
CONVERT_UINT32_ARG_CHECKED(src, 3);
CONVERT_UINT32_ARG_CHECKED(count, 4);
bool oob = !WasmInstanceObject::CopyTableEntries(
- isolate, instance, table_src_index, table_dst_index, dst, src, count);
+ isolate, instance, table_dst_index, table_src_index, dst, src, count);
if (oob) return ThrowTableOutOfBounds(isolate, instance);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
index fbb5b42344..2720176c30 100644
--- a/deps/v8/src/runtime/runtime-weak-refs.cc
+++ b/deps/v8/src/runtime/runtime-weak-refs.cc
@@ -20,7 +20,8 @@ RUNTIME_FUNCTION(Runtime_FinalizationGroupCleanupJob) {
CONVERT_ARG_HANDLE_CHECKED(JSFinalizationGroup, finalization_group, 0);
finalization_group->set_scheduled_for_cleanup(false);
- JSFinalizationGroup::Cleanup(finalization_group, isolate);
+ Handle<Object> cleanup(finalization_group->cleanup(), isolate);
+ JSFinalizationGroup::Cleanup(isolate, finalization_group, cleanup);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 773a5065e2..92ca9f3142 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -115,8 +115,9 @@ namespace internal {
F(ClearStepping, 0, 1) \
F(CollectGarbage, 1, 1) \
F(DebugAsyncFunctionEntered, 1, 1) \
- F(DebugAsyncFunctionFinished, 2, 1) \
F(DebugAsyncFunctionSuspended, 1, 1) \
+ F(DebugAsyncFunctionResumed, 1, 1) \
+ F(DebugAsyncFunctionFinished, 2, 1) \
F(DebugBreakAtEntry, 1, 1) \
F(DebugCollectCoverage, 0, 1) \
F(DebugGetLoadedScriptIds, 0, 1) \
@@ -202,7 +203,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
F(AccessCheck, 1, 1) \
F(AllocateByteArray, 1, 1) \
- F(AllocateInYoungGeneration, 1, 1) \
+ F(AllocateInYoungGeneration, 2, 1) \
F(AllocateInOldGeneration, 2, 1) \
F(AllocateSeqOneByteString, 1, 1) \
F(AllocateSeqTwoByteString, 1, 1) \
@@ -299,14 +300,20 @@ namespace internal {
I(HasProperty, 2, 1) \
F(InternalSetPrototype, 2, 1) \
I(IsJSReceiver, 1, 1) \
+ F(JSReceiverPreventExtensionsDontThrow, 1, 1) \
+ F(JSReceiverPreventExtensionsThrow, 1, 1) \
+ F(JSReceiverGetPrototypeOf, 1, 1) \
+ F(JSReceiverSetPrototypeOfDontThrow, 2, 1) \
+ F(JSReceiverSetPrototypeOfThrow, 2, 1) \
F(NewObject, 2, 1) \
F(ObjectCreate, 2, 1) \
F(ObjectEntries, 1, 1) \
F(ObjectEntriesSkipFastPath, 1, 1) \
- F(ObjectHasOwnProperty, 2, 1) \
- F(ObjectKeys, 1, 1) \
F(ObjectGetOwnPropertyNames, 1, 1) \
F(ObjectGetOwnPropertyNamesTryFast, 1, 1) \
+ F(ObjectHasOwnProperty, 2, 1) \
+ F(ObjectIsExtensible, 1, 1) \
+ F(ObjectKeys, 1, 1) \
F(ObjectValues, 1, 1) \
F(ObjectValuesSkipFastPath, 1, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
@@ -355,6 +362,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_PROXY(F, I) \
F(CheckProxyGetSetTrapResult, 2, 1) \
F(CheckProxyHasTrapResult, 2, 1) \
+ F(CheckProxyDeleteTrapResult, 2, 1) \
F(GetPropertyWithReceiver, 3, 1) \
F(SetPropertyWithReceiver, 4, 1)
@@ -425,23 +433,27 @@ namespace internal {
F(SymbolIsPrivate, 1, 1)
#define FOR_EACH_INTRINSIC_TEST(F, I) \
- F(ClearMegamorphicStubCache, 0, 1) \
F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
+ F(AbortCSAAssert, 1, 1) \
+ F(ArraySpeciesProtector, 0, 1) \
F(ClearFunctionFeedback, 1, 1) \
+ F(ClearMegamorphicStubCache, 0, 1) \
+ F(CloneWasmModule, 1, 1) \
F(CompleteInobjectSlackTracking, 1, 1) \
F(ConstructConsString, 2, 1) \
- F(ConstructSlicedString, 2, 1) \
F(ConstructDouble, 2, 1) \
+ F(ConstructSlicedString, 2, 1) \
F(DebugPrint, 1, 1) \
F(DebugTrace, 0, 1) \
F(DebugTrackRetainingPath, -1, 1) \
F(DeoptimizeFunction, 1, 1) \
- I(DeoptimizeNow, 0, 1) \
F(DeserializeWasmModule, 2, 1) \
F(DisallowCodegenFromStrings, 1, 1) \
F(DisallowWasmCodegen, 1, 1) \
F(DisassembleFunction, 1, 1) \
+ F(EnableCodeLoggingForTesting, 0, 1) \
+ F(EnsureFeedbackVectorForFunction, 1, 1) \
F(FreezeWasmLazyCompilation, 1, 1) \
F(GetCallable, 0, 1) \
F(GetInitializerFunction, 1, 1) \
@@ -452,7 +464,6 @@ namespace internal {
F(GetWasmRecoveredTrapCount, 0, 1) \
F(GlobalPrint, 1, 1) \
F(HasDictionaryElements, 1, 1) \
- F(HasPackedElements, 1, 1) \
F(HasDoubleElements, 1, 1) \
F(HasElementsInALargeObjectSpace, 1, 1) \
F(HasFastElements, 1, 1) \
@@ -470,6 +481,7 @@ namespace internal {
F(HasFixedUint8Elements, 1, 1) \
F(HasHoleyElements, 1, 1) \
F(HasObjectElements, 1, 1) \
+ F(HasPackedElements, 1, 1) \
F(HasSloppyArgumentsElements, 1, 1) \
F(HasSmiElements, 1, 1) \
F(HasSmiOrObjectElements, 1, 1) \
@@ -479,16 +491,15 @@ namespace internal {
F(InYoungGeneration, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
- F(WasmTierUpFunction, 2, 1) \
F(IsLiftoffFunction, 1, 1) \
+ F(IsThreadInWasm, 0, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
- F(IsThreadInWasm, 0, 1) \
+ F(MapIteratorProtector, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(OptimizeOsr, -1, 1) \
- F(EnsureFeedbackVectorForFunction, 1, 1) \
F(PrepareFunctionForOptimization, 1, 1) \
F(PrintWithNameForAssert, 2, 1) \
F(RedirectToWasmInterpreter, 2, 1) \
@@ -496,22 +507,21 @@ namespace internal {
F(SerializeWasmModule, 1, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(SetForceSlowPath, 1, 1) \
+ F(SetIteratorProtector, 0, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
- F(ArraySpeciesProtector, 0, 1) \
- F(MapIteratorProtector, 0, 1) \
- F(SetIteratorProtector, 0, 1) \
+ F(SetWasmThreadsEnabled, 1, 1) \
F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
+ F(TurbofanStaticAssert, 1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
+ F(WasmTierUpFunction, 2, 1) \
F(WasmTraceMemory, 1, 1) \
- F(SetWasmThreadsEnabled, 1, 1) \
- F(TurbofanStaticAssert, 1, 1) \
- F(EnableCodeLoggingForTesting, 0, 1)
+ I(DeoptimizeNow, 0, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
F(ArrayBufferDetach, 1, 1) \
@@ -520,29 +530,27 @@ namespace internal {
F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F, I) \
- F(ThrowWasmError, 1, 1) \
- F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmI32AtomicWait, 4, 1) \
- F(WasmI64AtomicWait, 5, 1) \
- F(WasmAtomicNotify, 3, 1) \
- F(WasmExceptionGetValues, 1, 1) \
- F(WasmExceptionGetTag, 1, 1) \
- F(WasmMemoryGrow, 2, 1) \
- F(WasmRunInterpreter, 2, 1) \
- F(WasmStackGuard, 0, 1) \
- F(WasmThrowCreate, 2, 1) \
- F(WasmThrowTypeError, 0, 1) \
- F(WasmRefFunc, 1, 1) \
- F(WasmFunctionTableGet, 3, 1) \
- F(WasmFunctionTableSet, 4, 1) \
- F(WasmTableInit, 5, 1) \
- F(WasmTableCopy, 5, 1) \
- F(WasmTableGrow, 3, 1) \
- F(WasmTableFill, 4, 1) \
- F(WasmIndirectCallCheckSignatureAndGetTargetInstance, 3, 1) \
- F(WasmIndirectCallGetTargetAddress, 2, 1) \
- F(WasmIsValidAnyFuncValue, 1, 1) \
+#define FOR_EACH_INTRINSIC_WASM(F, I) \
+ F(ThrowWasmError, 1, 1) \
+ F(ThrowWasmStackOverflow, 0, 1) \
+ F(WasmI32AtomicWait, 4, 1) \
+ F(WasmI64AtomicWait, 5, 1) \
+ F(WasmAtomicNotify, 3, 1) \
+ F(WasmExceptionGetValues, 1, 1) \
+ F(WasmExceptionGetTag, 1, 1) \
+ F(WasmMemoryGrow, 2, 1) \
+ F(WasmRunInterpreter, 2, 1) \
+ F(WasmStackGuard, 0, 1) \
+ F(WasmThrowCreate, 2, 1) \
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmRefFunc, 1, 1) \
+ F(WasmFunctionTableGet, 3, 1) \
+ F(WasmFunctionTableSet, 4, 1) \
+ F(WasmTableInit, 5, 1) \
+ F(WasmTableCopy, 5, 1) \
+ F(WasmTableGrow, 3, 1) \
+ F(WasmTableFill, 4, 1) \
+ F(WasmIsValidFuncRefValue, 1, 1) \
F(WasmCompileLazy, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
@@ -557,8 +565,6 @@ namespace internal {
F(KeyedStoreIC_Miss, 5, 1) \
F(StoreInArrayLiteralIC_Miss, 5, 1) \
F(KeyedStoreIC_Slow, 3, 1) \
- F(LoadAccessorProperty, 4, 1) \
- F(LoadCallbackProperty, 4, 1) \
F(LoadElementWithInterceptor, 2, 1) \
F(LoadGlobalIC_Miss, 4, 1) \
F(LoadGlobalIC_Slow, 3, 1) \
@@ -765,6 +771,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
class AllocateDoubleAlignFlag : public BitField<bool, 0, 1> {};
+class AllowLargeObjectAllocationFlag : public BitField<bool, 1, 1> {};
+
class DeclareGlobalsEvalFlag : public BitField<bool, 0, 1> {};
// A set of bits returned by Runtime_GetOptimizationStatus.
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index f55d5b57e5..2dec1c33db 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
delphick@chromium.org
jgruber@chromium.org
petermarshall@chromium.org
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index b4e75a6c20..d7e208eac5 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -96,22 +96,22 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
}
bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
- PagedSpace* read_only_space = isolate()->heap()->read_only_space();
- if (!read_only_space->Contains(obj)) return false;
+ if (!ReadOnlyHeap::Contains(obj)) return false;
- // For objects in RO_SPACE, never serialize the object, but instead create a
- // back reference that encodes the page number as the chunk_index and the
- // offset within the page as the chunk_offset.
+ // For objects on the read-only heap, never serialize the object, but instead
+ // create a back reference that encodes the page number as the chunk_index and
+ // the offset within the page as the chunk_offset.
Address address = obj.address();
Page* page = Page::FromAddress(address);
uint32_t chunk_index = 0;
+ ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
for (Page* p : *read_only_space) {
if (p == page) break;
++chunk_index;
}
uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
- SerializerReference back_reference =
- SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset);
+ SerializerReference back_reference = SerializerReference::BackReference(
+ SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
CHECK(SerializeBackReference(obj));
return true;
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
index 4fb600d1dd..0b96a5a050 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/deserializer-allocator.cc
@@ -20,8 +20,9 @@ namespace internal {
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
-Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
- if (space == LO_SPACE) {
+Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
+ const int space_number = static_cast<int>(space);
+ if (space == SnapshotSpace::kLargeObject) {
AlwaysAllocateScope scope(heap_);
// Note that we currently do not support deserialization of large code
// objects.
@@ -30,21 +31,21 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
HeapObject obj = result.ToObjectChecked();
deserialized_large_objects_.push_back(obj);
return obj.address();
- } else if (space == MAP_SPACE) {
+ } else if (space == SnapshotSpace::kMap) {
DCHECK_EQ(Map::kSize, size);
return allocated_maps_[next_map_index_++];
} else {
- DCHECK_LT(space, kNumberOfPreallocatedSpaces);
- Address address = high_water_[space];
+ DCHECK(IsPreAllocatedSpace(space));
+ Address address = high_water_[space_number];
DCHECK_NE(address, kNullAddress);
- high_water_[space] += size;
+ high_water_[space_number] += size;
#ifdef DEBUG
// Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space];
- int chunk_index = current_chunk_[space];
- DCHECK_LE(high_water_[space], reservation[chunk_index].end);
+ const Heap::Reservation& reservation = reservations_[space_number];
+ int chunk_index = current_chunk_[space_number];
+ DCHECK_LE(high_water_[space_number], reservation[chunk_index].end);
#endif
- if (space == CODE_SPACE)
+ if (space == SnapshotSpace::kCode)
MemoryChunk::FromAddress(address)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject(address);
@@ -52,7 +53,7 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
}
}
-Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
+Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
Address address;
HeapObject obj;
@@ -75,16 +76,17 @@ Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
}
}
-void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
- DCHECK_LT(space, kNumberOfPreallocatedSpaces);
- uint32_t chunk_index = current_chunk_[space];
- const Heap::Reservation& reservation = reservations_[space];
+void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) {
+ DCHECK(IsPreAllocatedSpace(space));
+ const int space_number = static_cast<int>(space);
+ uint32_t chunk_index = current_chunk_[space_number];
+ const Heap::Reservation& reservation = reservations_[space_number];
// Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
+ CHECK_EQ(reservation[chunk_index].end, high_water_[space_number]);
// Move to next reserved chunk.
- chunk_index = ++current_chunk_[space];
+ chunk_index = ++current_chunk_[space_number];
CHECK_LT(chunk_index, reservation.size());
- high_water_[space] = reservation[chunk_index].start;
+ high_water_[space_number] = reservation[chunk_index].start;
}
HeapObject DeserializerAllocator::GetMap(uint32_t index) {
@@ -97,12 +99,14 @@ HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
return deserialized_large_objects_[index];
}
-HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
+HeapObject DeserializerAllocator::GetObject(SnapshotSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
- DCHECK_LT(space, kNumberOfPreallocatedSpaces);
- DCHECK_LE(chunk_index, current_chunk_[space]);
- Address address = reservations_[space][chunk_index].start + chunk_offset;
+ DCHECK(IsPreAllocatedSpace(space));
+ const int space_number = static_cast<int>(space);
+ DCHECK_LE(chunk_index, current_chunk_[space_number]);
+ Address address =
+ reservations_[space_number][chunk_index].start + chunk_offset;
if (next_alignment_ != kWordAligned) {
int padding = Heap::GetFillToAlign(address, next_alignment_);
next_alignment_ = kWordAligned;
@@ -114,8 +118,8 @@ HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
void DeserializerAllocator::DecodeReservation(
const std::vector<SerializedData::Reservation>& res) {
- DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
- int current_space = FIRST_SPACE;
+ DCHECK_EQ(0, reservations_[0].size());
+ int current_space = 0;
for (auto& r : res) {
reservations_[current_space].push_back(
{r.chunk_size(), kNullAddress, kNullAddress});
@@ -127,11 +131,13 @@ void DeserializerAllocator::DecodeReservation(
bool DeserializerAllocator::ReserveSpace() {
#ifdef DEBUG
- for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
+ for (int i = 0; i < kNumberOfSpaces; ++i) {
DCHECK_GT(reservations_[i].size(), 0);
}
#endif // DEBUG
DCHECK(allocated_maps_.empty());
+ // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
+ // implemented.
if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
return false;
}
diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
index 27cacc79d5..18f9363cdf 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.h
+++ b/deps/v8/src/snapshot/deserializer-allocator.h
@@ -25,9 +25,9 @@ class DeserializerAllocator final {
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
- Address Allocate(AllocationSpace space, int size);
+ Address Allocate(SnapshotSpace space, int size);
- void MoveToNextChunk(AllocationSpace space);
+ void MoveToNextChunk(SnapshotSpace space);
void SetAlignment(AllocationAlignment alignment) {
DCHECK_EQ(kWordAligned, next_alignment_);
DCHECK_LE(kWordAligned, alignment);
@@ -51,7 +51,7 @@ class DeserializerAllocator final {
HeapObject GetMap(uint32_t index);
HeapObject GetLargeObject(uint32_t index);
- HeapObject GetObject(AllocationSpace space, uint32_t chunk_index,
+ HeapObject GetObject(SnapshotSpace space, uint32_t chunk_index,
uint32_t chunk_offset);
// ------- Reservation Methods -------
@@ -69,13 +69,13 @@ class DeserializerAllocator final {
private:
// Raw allocation without considering alignment.
- Address AllocateRaw(AllocationSpace space, int size);
+ Address AllocateRaw(SnapshotSpace space, int size);
private:
static constexpr int kNumberOfPreallocatedSpaces =
- SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
static constexpr int kNumberOfSpaces =
- SerializerDeserializer::kNumberOfSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfSpaces);
// The address of the next object that will be allocated in each space.
// Each space has a number of chunks reserved by the GC, with each chunk
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 1fd590db26..25e32e2cc0 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -90,10 +90,10 @@ Deserializer::~Deserializer() {
// process. It is also called on the body of each function.
void Deserializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) {
- // We are reading to a location outside of JS heap, so pass NEW_SPACE to
- // avoid triggering write barriers.
- ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), NEW_SPACE,
- kNullAddress);
+ // We are reading to a location outside of JS heap, so pass kNew to avoid
+ // triggering write barriers.
+ ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end),
+ SnapshotSpace::kNew, kNullAddress);
}
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
@@ -112,9 +112,10 @@ void Deserializer::DeserializeDeferredObjects() {
break;
}
default: {
- int space = code & kSpaceMask;
- DCHECK_LE(space, kNumberOfSpaces);
- DCHECK_EQ(code - space, kNewObject);
+ const int space_number = code & kSpaceMask;
+ DCHECK_LE(space_number, kNumberOfSpaces);
+ DCHECK_EQ(code - space_number, kNewObject);
+ SnapshotSpace space = static_cast<SnapshotSpace>(space_number);
HeapObject object = GetBackReferencedObject(space);
int size = source_.GetInt() << kTaggedSizeLog2;
Address obj_address = object.address();
@@ -201,7 +202,8 @@ String ForwardStringIfExists(Isolate* isolate, StringTableInsertionKey* key) {
} // namespace
-HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
+HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
+ SnapshotSpace space) {
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj.IsString()) {
// Uninitialize hash field as we need to recompute the hash.
@@ -209,7 +211,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
string.set_hash_field(String::kEmptyHashField);
// Rehash strings before read-only space is sealed. Strings outside
// read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
- if (space == RO_SPACE) {
+ if (space == SnapshotSpace::kReadOnlyHeap) {
to_rehash_.push_back(obj);
}
} else if (obj.NeedsRehashing()) {
@@ -249,7 +251,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
// We flush all code pages after deserializing the startup snapshot.
// Hence we only remember each individual code object when deserializing
// user code.
- if (deserializing_user_code() || space == LO_SPACE) {
+ if (deserializing_user_code() || space == SnapshotSpace::kLargeObject) {
new_code_objects_.push_back(Code::cast(obj));
}
} else if (FLAG_trace_maps && obj.IsMap()) {
@@ -326,16 +328,16 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
return obj;
}
-HeapObject Deserializer::GetBackReferencedObject(int space) {
+HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
HeapObject obj;
switch (space) {
- case LO_SPACE:
+ case SnapshotSpace::kLargeObject:
obj = allocator()->GetLargeObject(source_.GetInt());
break;
- case MAP_SPACE:
+ case SnapshotSpace::kMap:
obj = allocator()->GetMap(source_.GetInt());
break;
- case RO_SPACE: {
+ case SnapshotSpace::kReadOnlyHeap: {
uint32_t chunk_index = source_.GetInt();
uint32_t chunk_offset = source_.GetInt();
if (isolate()->heap()->deserialization_complete()) {
@@ -347,16 +349,14 @@ HeapObject Deserializer::GetBackReferencedObject(int space) {
Address address = page->OffsetToAddress(chunk_offset);
obj = HeapObject::FromAddress(address);
} else {
- obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
- chunk_index, chunk_offset);
+ obj = allocator()->GetObject(space, chunk_index, chunk_offset);
}
break;
}
default: {
uint32_t chunk_index = source_.GetInt();
uint32_t chunk_offset = source_.GetInt();
- obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
- chunk_index, chunk_offset);
+ obj = allocator()->GetObject(space, chunk_index, chunk_offset);
break;
}
}
@@ -372,49 +372,48 @@ HeapObject Deserializer::GetBackReferencedObject(int space) {
HeapObject Deserializer::ReadObject() {
MaybeObject object;
- // We are reading to a location outside of JS heap, so pass NEW_SPACE to
- // avoid triggering write barriers.
+ // We are reading to a location outside of JS heap, so pass kNew to avoid
+ // triggering write barriers.
bool filled =
ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
- NEW_SPACE, kNullAddress);
+ SnapshotSpace::kNew, kNullAddress);
CHECK(filled);
return object.GetHeapObjectAssumeStrong();
}
-HeapObject Deserializer::ReadObject(int space_number) {
+HeapObject Deserializer::ReadObject(SnapshotSpace space) {
const int size = source_.GetInt() << kObjectAlignmentBits;
- Address address =
- allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
+ Address address = allocator()->Allocate(space, size);
HeapObject obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
MaybeObjectSlot current(address);
MaybeObjectSlot limit(address + size);
- if (ReadData(current, limit, space_number, address)) {
+ if (ReadData(current, limit, space, address)) {
// Only post process if object content has not been deferred.
- obj = PostProcessNewObject(obj, space_number);
+ obj = PostProcessNewObject(obj, space);
}
#ifdef DEBUG
if (obj.IsCode()) {
- DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE);
+ DCHECK_EQ(space, SnapshotSpace::kCode);
} else {
- DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE);
+ DCHECK_NE(space, SnapshotSpace::kCode);
}
#endif // DEBUG
return obj;
}
-void Deserializer::ReadCodeObjectBody(int space_number,
+void Deserializer::ReadCodeObjectBody(SnapshotSpace space,
Address code_object_address) {
// At this point the code object is already allocated, its map field is
// initialized and its raw data fields and code stream are also read.
// Now we read the rest of code header's fields.
MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
MaybeObjectSlot limit(code_object_address + Code::kDataStart);
- bool filled = ReadData(current, limit, space_number, code_object_address);
+ bool filled = ReadData(current, limit, space, code_object_address);
CHECK(filled);
// Now iterate RelocInfos the same way it was done by the serialzier and
@@ -517,21 +516,22 @@ static void NoExternalReferencesCallback() {
}
template <typename TSlot>
-bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
+bool Deserializer::ReadData(TSlot current, TSlot limit,
+ SnapshotSpace source_space,
Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
- bool write_barrier_needed =
- (current_object_address != kNullAddress && source_space != NEW_SPACE &&
- source_space != CODE_SPACE);
+ bool write_barrier_needed = (current_object_address != kNullAddress &&
+ source_space != SnapshotSpace::kNew &&
+ source_space != SnapshotSpace::kCode);
while (current < limit) {
byte data = source_.Get();
switch (data) {
-#define CASE_STATEMENT(bytecode, space_number) \
- case bytecode + space_number: \
- STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
+#define CASE_STATEMENT(bytecode, snapshot_space) \
+ case bytecode + static_cast<int>(snapshot_space): \
+ STATIC_ASSERT((static_cast<int>(snapshot_space) & ~kSpaceMask) == 0);
#define CASE_BODY(bytecode, space_number_if_any) \
current = ReadDataCase<TSlot, bytecode, space_number_if_any>( \
@@ -541,18 +541,18 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with fall-through cases
// and one body.
-#define ALL_SPACES(bytecode) \
- CASE_STATEMENT(bytecode, NEW_SPACE) \
- CASE_BODY(bytecode, NEW_SPACE) \
- CASE_STATEMENT(bytecode, OLD_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, CODE_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, MAP_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, LO_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, RO_SPACE) \
+#define ALL_SPACES(bytecode) \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kNew) \
+ CASE_BODY(bytecode, SnapshotSpace::kNew) \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kOld) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kCode) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kMap) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kLargeObject) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kReadOnlyHeap) \
CASE_BODY(bytecode, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
@@ -579,16 +579,16 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
ALL_SPACES(kBackref)
// Find an object in the roots array and write a pointer to it to the
// current object.
- SINGLE_CASE(kRootArray, RO_SPACE)
+ SINGLE_CASE(kRootArray, SnapshotSpace::kReadOnlyHeap)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
- SINGLE_CASE(kPartialSnapshotCache, RO_SPACE)
+ SINGLE_CASE(kPartialSnapshotCache, SnapshotSpace::kReadOnlyHeap)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
- SINGLE_CASE(kReadOnlyObjectCache, RO_SPACE)
+ SINGLE_CASE(kReadOnlyObjectCache, SnapshotSpace::kReadOnlyHeap)
// Find an object in the attached references and write a pointer to it to
// the current object.
- SINGLE_CASE(kAttachedReference, RO_SPACE)
+ SINGLE_CASE(kAttachedReference, SnapshotSpace::kReadOnlyHeap)
#undef CASE_STATEMENT
#undef CASE_BODY
@@ -614,7 +614,7 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
case kNextChunk: {
int space = source_.Get();
- allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
+ allocator()->MoveToNextChunk(static_cast<SnapshotSpace>(space));
break;
}
@@ -791,13 +791,15 @@ Address Deserializer::ReadExternalReferenceCase() {
}
template <typename TSlot, SerializerDeserializer::Bytecode bytecode,
- int space_number_if_any>
+ SnapshotSpace space_number_if_any>
TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
Address current_object_address, byte data,
bool write_barrier_needed) {
bool emit_write_barrier = false;
- int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
- : space_number_if_any;
+ SnapshotSpace space = static_cast<SnapshotSpace>(
+ space_number_if_any == kAnyOldSpace
+ ? static_cast<SnapshotSpace>(data & kSpaceMask)
+ : space_number_if_any);
HeapObject heap_object;
HeapObjectReferenceType reference_type =
allocator()->GetAndClearNextReferenceIsWeak()
@@ -805,11 +807,11 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
: HeapObjectReferenceType::STRONG;
if (bytecode == kNewObject) {
- heap_object = ReadObject(space_number);
- emit_write_barrier = (space_number == NEW_SPACE);
+ heap_object = ReadObject(space);
+ emit_write_barrier = (space == SnapshotSpace::kNew);
} else if (bytecode == kBackref) {
- heap_object = GetBackReferencedObject(space_number);
- emit_write_barrier = (space_number == NEW_SPACE);
+ heap_object = GetBackReferencedObject(space);
+ emit_write_barrier = (space == SnapshotSpace::kNew);
} else if (bytecode == kRootArray) {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
@@ -819,8 +821,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
} else if (bytecode == kReadOnlyObjectCache) {
int cache_index = source_.GetInt();
heap_object = HeapObject::cast(
- isolate->heap()->read_only_heap()->cached_read_only_object(
- cache_index));
+ isolate->read_only_heap()->cached_read_only_object(cache_index));
DCHECK(!Heap::InYoungGeneration(heap_object));
emit_write_barrier = false;
} else if (bytecode == kPartialSnapshotCache) {
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 6e3f497d38..8dce1b3f3f 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -5,6 +5,7 @@
#ifndef V8_SNAPSHOT_DESERIALIZER_H_
#define V8_SNAPSHOT_DESERIALIZER_H_
+#include <utility>
#include <vector>
#include "src/objects/allocation-site.h"
@@ -39,6 +40,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
~Deserializer() override;
void SetRehashability(bool v) { can_rehash_ = v; }
+ std::pair<uint32_t, uint32_t> GetChecksum() const {
+ return source_.GetChecksum();
+ }
protected:
// Create a deserializer from a snapshot byte source.
@@ -65,7 +69,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// This returns the address of an object that has been described in the
// snapshot by chunk index and offset.
- HeapObject GetBackReferencedObject(int space);
+ HeapObject GetBackReferencedObject(SnapshotSpace space);
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
@@ -122,11 +126,13 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
template <typename TSlot>
- bool ReadData(TSlot start, TSlot end, int space, Address object_address);
+ bool ReadData(TSlot start, TSlot end, SnapshotSpace space,
+ Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
- template <typename TSlot, Bytecode bytecode, int space_number_if_any>
+ template <typename TSlot, Bytecode bytecode,
+ SnapshotSpace space_number_if_any>
inline TSlot ReadDataCase(Isolate* isolate, TSlot current,
Address current_object_address, byte data,
bool write_barrier_needed);
@@ -135,8 +141,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
inline Address ReadExternalReferenceCase();
HeapObject ReadObject();
- HeapObject ReadObject(int space_number);
- void ReadCodeObjectBody(int space_number, Address code_object_address);
+ HeapObject ReadObject(SnapshotSpace space_number);
+ void ReadCodeObjectBody(SnapshotSpace space_number,
+ Address code_object_address);
public:
void VisitCodeTarget(Code host, RelocInfo* rinfo);
@@ -151,7 +158,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
TSlot ReadRepeatedObject(TSlot current, int repeat_count);
// Special handling for serialized code like hooking up internalized strings.
- HeapObject PostProcessNewObject(HeapObject obj, int space);
+ HeapObject PostProcessNewObject(HeapObject obj, SnapshotSpace space);
// Objects from the attached object descriptions in the serialized user code.
std::vector<Handle<HeapObject>> attached_objects_;
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index 4cee1ac131..f4183b4b87 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -127,7 +127,15 @@ void PlatformEmbeddedFileWriterGeneric::DeclareExternalFilename(
fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
}
-void PlatformEmbeddedFileWriterGeneric::FileEpilogue() {}
+void PlatformEmbeddedFileWriterGeneric::FileEpilogue() {
+ // Omitting this section can imply an executable stack, which is usually
+ // a linker warning/error. C++ compilers add these automatically, but
+ // compiling assembly requires the .note.GNU-stack section to be inserted
+ // manually.
+ // Additional documentation:
+ // https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
+ fprintf(fp_, ".section .note.GNU-stack,\"\",%%progbits\n");
+}
int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
DataDirective directive) {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index d0758cb42c..69457e11a5 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -11,7 +11,6 @@
#if defined(V8_OS_WIN_X64)
#include "src/builtins/builtins.h"
#include "src/diagnostics/unwinding-info-win64.h"
-#include "src/objects/objects-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
#endif
@@ -570,11 +569,7 @@ void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
// Replace any Windows style paths (backslashes) with forward
// slashes.
std::string fixed_filename(filename);
- for (auto& c : fixed_filename) {
- if (c == '\\') {
- c = '/';
- }
- }
+ std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/');
fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
}
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 6bf198230f..819f7009c2 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -259,12 +259,14 @@ int main(int argc, char** argv) {
// Set code range such that relative jumps for builtins to
// builtin calls in the snapshot are possible.
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- size_t code_range_size =
+ size_t code_range_size_mb =
i::kMaximalCodeRangeSize == 0
? i::kMaxPCRelativeCodeRangeInMB
: std::min(i::kMaximalCodeRangeSize / i::MB,
i::kMaxPCRelativeCodeRangeInMB);
- i_isolate->heap()->ConfigureHeap(0, 0, code_range_size);
+ v8::ResourceConstraints constraints;
+ constraints.set_code_range_size_in_bytes(code_range_size_mb * i::MB);
+ i_isolate->heap()->ConfigureHeap(constraints);
// The isolate contains data from builtin compilation that needs
// to be written out if builtins are embedded.
i_isolate->RegisterEmbeddedFileWriter(&embedded_writer);
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index f294d33b5c..ea2136007b 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -16,7 +16,6 @@ namespace internal {
enum NativeType {
EXTRAS,
- TEST
};
// Extra handling for V8_EXPORT_PRIVATE in combination with USING_V8_SHARED
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 9b56f129df..22854bf14a 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -73,7 +73,8 @@ void PartialDeserializer::DeserializeEmbedderFields(
int space = code & kSpaceMask;
DCHECK_LE(space, kNumberOfSpaces);
DCHECK_EQ(code - space, kNewObject);
- Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
+ Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(
+ static_cast<SnapshotSpace>(space))),
isolate());
int index = source()->GetInt();
int size = source()->GetInt();
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 036f0a0414..7b4ffbb2bf 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -214,7 +214,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
if (DataIsEmpty(data)) continue;
// Restore original values from cleared fields.
EmbedderDataSlot(js_obj, i).store_raw(original_embedder_values[i], no_gc);
- embedder_fields_sink_.Put(kNewObject + reference.space(),
+ embedder_fields_sink_.Put(kNewObject + static_cast<int>(reference.space()),
"embedder field holder");
embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
embedder_fields_sink_.PutInt(reference.chunk_offset(),
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index 576e644846..5ac5a6444a 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -21,7 +21,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
V8::FatalProcessOutOfMemory(isolate, "ReadOnlyDeserializer");
}
- ReadOnlyHeap* ro_heap = isolate->heap()->read_only_heap();
+ ReadOnlyHeap* ro_heap = isolate->read_only_heap();
// No active threads.
DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index f4b45a15cc..4ddaf37773 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -67,7 +67,7 @@ void ReadOnlySerializer::FinalizeSerialization() {
#ifdef DEBUG
// Check that every object on read-only heap is reachable (and was
// serialized).
- ReadOnlyHeapIterator iterator(isolate()->heap()->read_only_heap());
+ ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(serialized_objects_.count(object));
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index e7c44236ac..c81e9a1e21 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -12,6 +12,30 @@
namespace v8 {
namespace internal {
+// TODO(goszczycki): Move this somewhere every file in src/snapshot can use it.
+// The spaces suported by the serializer. Spaces after LO_SPACE (NEW_LO_SPACE
+// and CODE_LO_SPACE) are not supported.
+enum class SnapshotSpace {
+ kReadOnlyHeap = RO_SPACE,
+ kNew = NEW_SPACE,
+ kOld = OLD_SPACE,
+ kCode = CODE_SPACE,
+ kMap = MAP_SPACE,
+ kLargeObject = LO_SPACE,
+ kNumberOfPreallocatedSpaces = kCode + 1,
+ kNumberOfSpaces = kLargeObject + 1,
+ kSpecialValueSpace = kNumberOfSpaces,
+ // Number of spaces which should be allocated by the heap. Eventually
+ // kReadOnlyHeap will move to the end of this enum and this will be equal to
+ // it.
+ kNumberOfHeapSpaces = kNumberOfSpaces,
+};
+
+constexpr bool IsPreAllocatedSpace(SnapshotSpace space) {
+ return static_cast<int>(space) <
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
+}
+
class SerializerReference {
private:
enum SpecialValueType {
@@ -21,33 +45,32 @@ class SerializerReference {
kBuiltinReference,
};
- static const int kSpecialValueSpace = LAST_SPACE + 1;
- STATIC_ASSERT(kSpecialValueSpace < (1 << kSpaceTagSize));
+ STATIC_ASSERT(static_cast<int>(SnapshotSpace::kSpecialValueSpace) <
+ (1 << kSpaceTagSize));
SerializerReference(SpecialValueType type, uint32_t value)
- : bitfield_(SpaceBits::encode(kSpecialValueSpace) |
+ : bitfield_(SpaceBits::encode(SnapshotSpace::kSpecialValueSpace) |
SpecialValueTypeBits::encode(type)),
value_(value) {}
public:
SerializerReference() : SerializerReference(kInvalidValue, 0) {}
- SerializerReference(uint32_t space, uint32_t chunk_index,
+ SerializerReference(SnapshotSpace space, uint32_t chunk_index,
uint32_t chunk_offset)
: bitfield_(SpaceBits::encode(space) |
ChunkIndexBits::encode(chunk_index)),
value_(chunk_offset) {}
- static SerializerReference BackReference(AllocationSpace space,
+ static SerializerReference BackReference(SnapshotSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- DCHECK_LT(space, LO_SPACE);
return SerializerReference(space, chunk_index, chunk_offset);
}
static SerializerReference MapReference(uint32_t index) {
- return SerializerReference(MAP_SPACE, 0, index);
+ return SerializerReference(SnapshotSpace::kMap, 0, index);
}
static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
@@ -55,7 +78,7 @@ class SerializerReference {
}
static SerializerReference LargeObjectReference(uint32_t index) {
- return SerializerReference(LO_SPACE, 0, index);
+ return SerializerReference(SnapshotSpace::kLargeObject, 0, index);
}
static SerializerReference AttachedReference(uint32_t index) {
@@ -67,17 +90,17 @@ class SerializerReference {
}
bool is_valid() const {
- return SpaceBits::decode(bitfield_) != kSpecialValueSpace ||
+ return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace ||
SpecialValueTypeBits::decode(bitfield_) != kInvalidValue;
}
bool is_back_reference() const {
- return SpaceBits::decode(bitfield_) <= LAST_SPACE;
+ return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace;
}
- AllocationSpace space() const {
+ SnapshotSpace space() const {
DCHECK(is_back_reference());
- return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
+ return SpaceBits::decode(bitfield_);
}
uint32_t chunk_offset() const {
@@ -86,17 +109,17 @@ class SerializerReference {
}
uint32_t chunk_index() const {
- DCHECK(space() != MAP_SPACE && space() != LO_SPACE);
+ DCHECK(IsPreAllocatedSpace(space()));
return ChunkIndexBits::decode(bitfield_);
}
uint32_t map_index() const {
- DCHECK_EQ(MAP_SPACE, SpaceBits::decode(bitfield_));
+ DCHECK_EQ(SnapshotSpace::kMap, SpaceBits::decode(bitfield_));
return value_;
}
bool is_off_heap_backing_store_reference() const {
- return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kOffHeapBackingStore;
}
@@ -106,12 +129,12 @@ class SerializerReference {
}
uint32_t large_object_index() const {
- DCHECK_EQ(LO_SPACE, SpaceBits::decode(bitfield_));
+ DCHECK_EQ(SnapshotSpace::kLargeObject, SpaceBits::decode(bitfield_));
return value_;
}
bool is_attached_reference() const {
- return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kAttachedReference;
}
@@ -121,7 +144,7 @@ class SerializerReference {
}
bool is_builtin_reference() const {
- return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kBuiltinReference;
}
@@ -131,7 +154,7 @@ class SerializerReference {
}
private:
- class SpaceBits : public BitField<int, 0, kSpaceTagSize> {};
+ class SpaceBits : public BitField<SnapshotSpace, 0, kSpaceTagSize> {};
class ChunkIndexBits
: public BitField<uint32_t, SpaceBits::kNext, 32 - kSpaceTagSize> {};
class SpecialValueTypeBits
diff --git a/deps/v8/src/snapshot/serializer-allocator.cc b/deps/v8/src/snapshot/serializer-allocator.cc
index 763244137f..a709715bdd 100644
--- a/deps/v8/src/snapshot/serializer-allocator.cc
+++ b/deps/v8/src/snapshot/serializer-allocator.cc
@@ -23,42 +23,42 @@ void SerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
custom_chunk_size_ = chunk_size;
}
-static uint32_t PageSizeOfSpace(int space) {
+static uint32_t PageSizeOfSpace(SnapshotSpace space) {
return static_cast<uint32_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
}
-uint32_t SerializerAllocator::TargetChunkSize(int space) {
+uint32_t SerializerAllocator::TargetChunkSize(SnapshotSpace space) {
if (custom_chunk_size_ == 0) return PageSizeOfSpace(space);
DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space));
return custom_chunk_size_;
}
-SerializerReference SerializerAllocator::Allocate(AllocationSpace space,
+SerializerReference SerializerAllocator::Allocate(SnapshotSpace space,
uint32_t size) {
- DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
+ const int space_number = static_cast<int>(space);
+ DCHECK(IsPreAllocatedSpace(space));
DCHECK(size > 0 && size <= PageSizeOfSpace(space));
// Maps are allocated through AllocateMap.
- DCHECK_NE(MAP_SPACE, space);
- // We tenure large object allocations.
- DCHECK_NE(NEW_LO_SPACE, space);
+ DCHECK_NE(SnapshotSpace::kMap, space);
- uint32_t old_chunk_size = pending_chunk_[space];
+ uint32_t old_chunk_size = pending_chunk_[space_number];
uint32_t new_chunk_size = old_chunk_size + size;
// Start a new chunk if the new size exceeds the target chunk size.
// We may exceed the target chunk size if the single object size does.
if (new_chunk_size > TargetChunkSize(space) && old_chunk_size != 0) {
serializer_->PutNextChunk(space);
- completed_chunks_[space].push_back(pending_chunk_[space]);
- pending_chunk_[space] = 0;
+ completed_chunks_[space_number].push_back(pending_chunk_[space_number]);
+ pending_chunk_[space_number] = 0;
new_chunk_size = size;
}
- uint32_t offset = pending_chunk_[space];
- pending_chunk_[space] = new_chunk_size;
+ uint32_t offset = pending_chunk_[space_number];
+ pending_chunk_[space_number] = new_chunk_size;
return SerializerReference::BackReference(
- space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
+ space, static_cast<uint32_t>(completed_chunks_[space_number].size()),
+ offset);
}
SerializerReference SerializerAllocator::AllocateMap() {
@@ -83,23 +83,25 @@ SerializerReference SerializerAllocator::AllocateOffHeapBackingStore() {
bool SerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
DCHECK(reference.is_back_reference());
- AllocationSpace space = reference.space();
- if (space == LO_SPACE) {
+ SnapshotSpace space = reference.space();
+ if (space == SnapshotSpace::kLargeObject) {
return reference.large_object_index() < seen_large_objects_index_;
- } else if (space == MAP_SPACE) {
+ } else if (space == SnapshotSpace::kMap) {
return reference.map_index() < num_maps_;
- } else if (space == RO_SPACE &&
+ } else if (space == SnapshotSpace::kReadOnlyHeap &&
serializer_->isolate()->heap()->deserialization_complete()) {
// If not deserializing the isolate itself, then we create BackReferences
- // for all RO_SPACE objects without ever allocating.
+ // for all read-only heap objects without ever allocating.
return true;
} else {
+ const int space_number = static_cast<int>(space);
size_t chunk_index = reference.chunk_index();
- if (chunk_index == completed_chunks_[space].size()) {
- return reference.chunk_offset() < pending_chunk_[space];
+ if (chunk_index == completed_chunks_[space_number].size()) {
+ return reference.chunk_offset() < pending_chunk_[space_number];
} else {
- return chunk_index < completed_chunks_[space].size() &&
- reference.chunk_offset() < completed_chunks_[space][chunk_index];
+ return chunk_index < completed_chunks_[space_number].size() &&
+ reference.chunk_offset() <
+ completed_chunks_[space_number][chunk_index];
}
}
}
@@ -109,7 +111,7 @@ std::vector<SerializedData::Reservation>
SerializerAllocator::EncodeReservations() const {
std::vector<SerializedData::Reservation> out;
- for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out.emplace_back(completed_chunks_[i][j]);
}
@@ -120,11 +122,14 @@ SerializerAllocator::EncodeReservations() const {
out.back().mark_as_last();
}
- STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
+ STATIC_ASSERT(SnapshotSpace::kMap ==
+ SnapshotSpace::kNumberOfPreallocatedSpaces);
out.emplace_back(num_maps_ * Map::kSize);
out.back().mark_as_last();
- STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
+ STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
+ 1);
out.emplace_back(large_objects_total_size_);
out.back().mark_as_last();
@@ -136,21 +141,24 @@ void SerializerAllocator::OutputStatistics() {
PrintF(" Spaces (bytes):\n");
- for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
+ for (int space = 0; space < kNumberOfSpaces; space++) {
PrintF("%16s", Heap::GetSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
- for (int space = FIRST_SPACE; space < kNumberOfPreallocatedSpaces; space++) {
+ for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16zu", s);
}
- STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
+ STATIC_ASSERT(SnapshotSpace::kMap ==
+ SnapshotSpace::kNumberOfPreallocatedSpaces);
PrintF("%16d", num_maps_ * Map::kSize);
- STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
+ STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
+ 1);
PrintF("%16d\n", large_objects_total_size_);
}
diff --git a/deps/v8/src/snapshot/serializer-allocator.h b/deps/v8/src/snapshot/serializer-allocator.h
index 0ca968f0fe..0d15c5a91b 100644
--- a/deps/v8/src/snapshot/serializer-allocator.h
+++ b/deps/v8/src/snapshot/serializer-allocator.h
@@ -16,7 +16,7 @@ class SerializerAllocator final {
public:
explicit SerializerAllocator(Serializer* serializer);
- SerializerReference Allocate(AllocationSpace space, uint32_t size);
+ SerializerReference Allocate(SnapshotSpace space, uint32_t size);
SerializerReference AllocateMap();
SerializerReference AllocateLargeObject(uint32_t size);
SerializerReference AllocateOffHeapBackingStore();
@@ -35,12 +35,12 @@ class SerializerAllocator final {
private:
// We try to not exceed this size for every chunk. We will not succeed for
// larger objects though.
- uint32_t TargetChunkSize(int space);
+ uint32_t TargetChunkSize(SnapshotSpace space);
static constexpr int kNumberOfPreallocatedSpaces =
- SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
static constexpr int kNumberOfSpaces =
- SerializerDeserializer::kNumberOfSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfSpaces);
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 30da8db662..c845a089a3 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -6,9 +6,9 @@
#define V8_SNAPSHOT_SERIALIZER_COMMON_H_
#include "src/base/bits.h"
+#include "src/base/memory.h"
#include "src/codegen/external-reference-table.h"
#include "src/common/globals.h"
-#include "src/common/v8memory.h"
#include "src/objects/visitors.h"
#include "src/sanitizer/msan.h"
#include "src/snapshot/references.h"
@@ -102,19 +102,6 @@ class SerializerDeserializer : public RootVisitor {
public:
static void Iterate(Isolate* isolate, RootVisitor* visitor);
- // No reservation for large object space necessary.
- // We also handle map space differenly.
- STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1);
-
- // We do not support young generation large objects and large code objects.
- STATIC_ASSERT(LAST_SPACE == NEW_LO_SPACE);
- STATIC_ASSERT(LAST_SPACE - 2 == LO_SPACE);
- static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1;
-
- // The number of spaces supported by the serializer. Spaces after LO_SPACE
- // (NEW_LO_SPACE and CODE_LO_SPACE) are not supported.
- static const int kNumberOfSpaces = LO_SPACE + 1;
-
protected:
static bool CanBeDeferred(HeapObject o);
@@ -123,6 +110,12 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo>& call_handler_infos);
+ static const int kNumberOfPreallocatedSpaces =
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
+
+ static const int kNumberOfSpaces =
+ static_cast<int>(SnapshotSpace::kNumberOfSpaces);
+
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \
@@ -259,7 +252,7 @@ class SerializerDeserializer : public RootVisitor {
//
// Some other constants.
//
- static const int kAnyOldSpace = -1;
+ static const SnapshotSpace kAnyOldSpace = SnapshotSpace::kNumberOfSpaces;
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
@@ -344,12 +337,13 @@ class SerializedData {
protected:
void SetHeaderValue(uint32_t offset, uint32_t value) {
- WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset, value);
+ base::WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset,
+ value);
}
uint32_t GetHeaderValue(uint32_t offset) const {
- return ReadLittleEndianValue<uint32_t>(reinterpret_cast<Address>(data_) +
- offset);
+ return base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(data_) + offset);
}
void AllocateData(uint32_t size);
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index b2dd6a33e7..5b68aaa87b 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -27,7 +27,7 @@ Serializer::Serializer(Isolate* isolate)
allocator_(this) {
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
- for (int space = 0; space < LAST_SPACE; ++space) {
+ for (int space = 0; space < kNumberOfSpaces; ++space) {
instance_type_count_[space] = NewArray<int>(kInstanceTypes);
instance_type_size_[space] = NewArray<size_t>(kInstanceTypes);
for (int i = 0; i < kInstanceTypes; i++) {
@@ -36,7 +36,7 @@ Serializer::Serializer(Isolate* isolate)
}
}
} else {
- for (int space = 0; space < LAST_SPACE; ++space) {
+ for (int space = 0; space < kNumberOfSpaces; ++space) {
instance_type_count_[space] = nullptr;
instance_type_size_[space] = nullptr;
}
@@ -47,7 +47,7 @@ Serializer::Serializer(Isolate* isolate)
Serializer::~Serializer() {
if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
- for (int space = 0; space < LAST_SPACE; ++space) {
+ for (int space = 0; space < kNumberOfSpaces; ++space) {
if (instance_type_count_[space] != nullptr) {
DeleteArray(instance_type_count_[space]);
DeleteArray(instance_type_size_[space]);
@@ -57,10 +57,11 @@ Serializer::~Serializer() {
}
#ifdef OBJECT_PRINT
-void Serializer::CountInstanceType(Map map, int size, AllocationSpace space) {
+void Serializer::CountInstanceType(Map map, int size, SnapshotSpace space) {
+ const int space_number = static_cast<int>(space);
int instance_type = map.instance_type();
- instance_type_count_[space][instance_type]++;
- instance_type_size_[space][instance_type] += size;
+ instance_type_count_[space_number][instance_type]++;
+ instance_type_size_[space_number][instance_type] += size;
}
#endif // OBJECT_PRINT
@@ -73,7 +74,7 @@ void Serializer::OutputStatistics(const char* name) {
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
- for (int space = 0; space < LAST_SPACE; ++space) { \
+ for (int space = 0; space < kNumberOfSpaces; ++space) { \
if (instance_type_count_[space][Name]) { \
PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
instance_type_size_[space][Name], \
@@ -173,8 +174,8 @@ bool Serializer::SerializeBackReference(HeapObject obj) {
}
PutAlignmentPrefix(obj);
- AllocationSpace space = reference.space();
- sink_.Put(kBackref + space, "BackRef");
+ SnapshotSpace space = reference.space();
+ sink_.Put(kBackref + static_cast<int>(space), "BackRef");
PutBackReference(obj, reference);
}
return true;
@@ -221,11 +222,11 @@ void Serializer::PutBackReference(HeapObject object,
SerializerReference reference) {
DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
switch (reference.space()) {
- case MAP_SPACE:
+ case SnapshotSpace::kMap:
sink_.PutInt(reference.map_index(), "BackRefMapIndex");
break;
- case LO_SPACE:
+ case SnapshotSpace::kLargeObject:
sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex");
break;
@@ -255,9 +256,9 @@ int Serializer::PutAlignmentPrefix(HeapObject object) {
return 0;
}
-void Serializer::PutNextChunk(int space) {
+void Serializer::PutNextChunk(SnapshotSpace space) {
sink_.Put(kNextChunk, "NextChunk");
- sink_.Put(space, "NextChunkSpace");
+ sink_.Put(static_cast<int>(space), "NextChunkSpace");
}
void Serializer::PutRepeat(int repeat_count) {
@@ -298,7 +299,7 @@ Code Serializer::CopyCode(Code code) {
reinterpret_cast<Address>(&code_buffer_.front())));
}
-void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
+void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
int size, Map map) {
if (serializer_->code_address_map_) {
const char* code_name =
@@ -307,22 +308,23 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
CodeNameEvent(object_.address(), sink_->Position(), code_name));
}
+ const int space_number = static_cast<int>(space);
SerializerReference back_reference;
- if (space == LO_SPACE) {
- sink_->Put(kNewObject + space, "NewLargeObject");
+ if (space == SnapshotSpace::kLargeObject) {
+ sink_->Put(kNewObject + space_number, "NewLargeObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
CHECK(!object_.IsCode());
back_reference = serializer_->allocator()->AllocateLargeObject(size);
- } else if (space == MAP_SPACE) {
+ } else if (space == SnapshotSpace::kMap) {
DCHECK_EQ(Map::kSize, size);
back_reference = serializer_->allocator()->AllocateMap();
- sink_->Put(kNewObject + space, "NewMap");
+ sink_->Put(kNewObject + space_number, "NewMap");
// This is redundant, but we include it anyways.
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
} else {
int fill = serializer_->PutAlignmentPrefix(object_);
back_reference = serializer_->allocator()->Allocate(space, size + fill);
- sink_->Put(kNewObject + space, "NewObject");
+ sink_->Put(kNewObject + space_number, "NewObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
}
@@ -468,8 +470,9 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
ExternalTwoByteString::cast(string).resource()->data());
}
- AllocationSpace space =
- (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
+ SnapshotSpace space = (allocation_size > kMaxRegularHeapObjectSize)
+ ? SnapshotSpace::kLargeObject
+ : SnapshotSpace::kOld;
SerializePrologue(space, allocation_size, map);
// Output the rest of the imaginary string.
@@ -534,8 +537,8 @@ void Serializer::ObjectSerializer::Serialize() {
SerializeExternalString();
return;
} else if (!ReadOnlyHeap::Contains(object_)) {
- // Only clear padding for strings outside RO_SPACE. RO_SPACE should have
- // been cleared elsewhere.
+ // Only clear padding for strings outside the read-only heap. Read-only heap
+ // should have been cleared elsewhere.
if (object_.IsSeqOneByteString()) {
// Clear padding bytes at the end. Done here to avoid having to do this
// at allocation sites in generated code.
@@ -568,11 +571,21 @@ void Serializer::ObjectSerializer::Serialize() {
void Serializer::ObjectSerializer::SerializeObject() {
int size = object_.Size();
Map map = object_.map();
- AllocationSpace space =
- MemoryChunk::FromHeapObject(object_)->owner()->identity();
- // Young generation large objects are tenured.
- if (space == NEW_LO_SPACE) {
- space = LO_SPACE;
+ SnapshotSpace space;
+ if (ReadOnlyHeap::Contains(object_)) {
+ space = SnapshotSpace::kReadOnlyHeap;
+ } else {
+ AllocationSpace heap_space =
+ MemoryChunk::FromHeapObject(object_)->owner_identity();
+ // Large code objects are not supported and cannot be expressed by
+ // SnapshotSpace.
+ DCHECK_NE(heap_space, CODE_LO_SPACE);
+ // Young generation large objects are tenured.
+ if (heap_space == NEW_LO_SPACE) {
+ space = SnapshotSpace::kLargeObject;
+ } else {
+ space = static_cast<SnapshotSpace>(heap_space);
+ }
}
SerializePrologue(space, size, map);
@@ -612,7 +625,8 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
bytes_processed_so_far_ = kTaggedSize;
serializer_->PutAlignmentPrefix(object_);
- sink_->Put(kNewObject + back_reference.space(), "deferred object");
+ sink_->Put(kNewObject + static_cast<int>(back_reference.space()),
+ "deferred object");
serializer_->PutBackReference(object_, back_reference);
sink_->PutInt(size >> kTaggedSizeLog2, "deferred object size");
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index b70c7fd45a..fad2ec8a88 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -205,7 +205,7 @@ class Serializer : public SerializerDeserializer {
void PutAttachedReference(SerializerReference reference);
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject object);
- void PutNextChunk(int space);
+ void PutNextChunk(SnapshotSpace space);
void PutRepeat(int repeat_count);
// Returns true if the object was successfully serialized as a root.
@@ -243,7 +243,7 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
#ifdef OBJECT_PRINT
- void CountInstanceType(Map map, int size, AllocationSpace space);
+ void CountInstanceType(Map map, int size, SnapshotSpace space);
#endif // OBJECT_PRINT
#ifdef DEBUG
@@ -272,8 +272,8 @@ class Serializer : public SerializerDeserializer {
#ifdef OBJECT_PRINT
static const int kInstanceTypes = LAST_TYPE + 1;
- int* instance_type_count_[LAST_SPACE];
- size_t* instance_type_size_[LAST_SPACE];
+ int* instance_type_count_[kNumberOfSpaces];
+ size_t* instance_type_size_[kNumberOfSpaces];
#endif // OBJECT_PRINT
#ifdef DEBUG
@@ -321,7 +321,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void VisitOffHeapTarget(Code host, RelocInfo* target) override;
private:
- void SerializePrologue(AllocationSpace space, int size, Map map);
+ void SerializePrologue(SnapshotSpace space, int size, Map map);
// This function outputs or skips the raw data between the last pointer and
// up to the current position.
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 61396aaa71..f20f2ad33f 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -5,7 +5,10 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_
#define V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_
+#include <utility>
+
#include "src/base/logging.h"
+#include "src/snapshot/serializer-common.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -66,6 +69,11 @@ class SnapshotByteSource final {
int position() { return position_; }
void set_position(int position) { position_ = position; }
+ std::pair<uint32_t, uint32_t> GetChecksum() const {
+ Checksum checksum(Vector<const byte>(data_, length_));
+ return {checksum.a(), checksum.b()};
+ }
+
private:
const byte* data_;
int length_;
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index ef933ef83a..4a4da9f755 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -8,7 +8,6 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/objects/objects-inl.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -99,11 +98,12 @@ class Snapshot : public AllStatic {
uint32_t index);
static uint32_t GetHeaderValue(const v8::StartupData* data, uint32_t offset) {
- return ReadLittleEndianValue<uint32_t>(
+ return base::ReadLittleEndianValue<uint32_t>(
reinterpret_cast<Address>(data->data) + offset);
}
static void SetHeaderValue(char* data, uint32_t offset, uint32_t value) {
- WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset, value);
+ base::WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset,
+ value);
}
static void CheckVersion(const v8::StartupData* data);
diff --git a/deps/v8/src/strings/OWNERS b/deps/v8/src/strings/OWNERS
index 037c916f24..35ab9a4634 100644
--- a/deps/v8/src/strings/OWNERS
+++ b/deps/v8/src/strings/OWNERS
@@ -1,5 +1,7 @@
bmeurer@chromium.org
-jgruber@chromium.org
jkummerow@chromium.org
+leszeks@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/strings/char-predicates-inl.h b/deps/v8/src/strings/char-predicates-inl.h
index cdd8ddb4ea..3b9b13aba5 100644
--- a/deps/v8/src/strings/char-predicates-inl.h
+++ b/deps/v8/src/strings/char-predicates-inl.h
@@ -51,6 +51,18 @@ inline constexpr bool IsBinaryDigit(uc32 c) {
return c == '0' || c == '1';
}
+inline constexpr bool IsAsciiLower(uc32 c) { return IsInRange(c, 'a', 'z'); }
+
+inline constexpr bool IsAsciiUpper(uc32 c) { return IsInRange(c, 'A', 'Z'); }
+
+inline constexpr uc32 ToAsciiUpper(uc32 c) {
+ return c & ~(IsAsciiLower(c) << 5);
+}
+
+inline constexpr uc32 ToAsciiLower(uc32 c) {
+ return c | (IsAsciiUpper(c) << 5);
+}
+
inline constexpr bool IsRegExpWord(uc16 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z') || IsDecimalDigit(c) ||
(c == '_');
diff --git a/deps/v8/src/strings/char-predicates.h b/deps/v8/src/strings/char-predicates.h
index 43b4d091d1..0262048ec7 100644
--- a/deps/v8/src/strings/char-predicates.h
+++ b/deps/v8/src/strings/char-predicates.h
@@ -26,6 +26,12 @@ inline constexpr bool IsBinaryDigit(uc32 c);
inline constexpr bool IsRegExpWord(uc32 c);
inline constexpr bool IsRegExpNewline(uc32 c);
+inline constexpr bool IsAsciiLower(uc32 ch);
+inline constexpr bool IsAsciiUpper(uc32 ch);
+
+inline constexpr uc32 ToAsciiUpper(uc32 ch);
+inline constexpr uc32 ToAsciiLower(uc32 ch);
+
// ES#sec-names-and-keywords
// This includes '_', '$' and '\', and ID_Start according to
// http://www.unicode.org/reports/tr31/, which consists of categories
diff --git a/deps/v8/src/strings/string-builder-inl.h b/deps/v8/src/strings/string-builder-inl.h
index 88d69b37b5..9f78884a60 100644
--- a/deps/v8/src/strings/string-builder-inl.h
+++ b/deps/v8/src/strings/string-builder-inl.h
@@ -147,6 +147,13 @@ class IncrementalStringBuilder {
}
}
+ V8_INLINE void AppendInt(int i) {
+ char buffer[kIntToCStringBufferSize];
+ const char* str =
+ IntToCString(i, Vector<char>(buffer, kIntToCStringBufferSize));
+ AppendCString(str);
+ }
+
V8_INLINE bool CurrentPartCanFit(int length) {
return part_length_ - current_index_ > length;
}
@@ -277,9 +284,13 @@ class IncrementalStringBuilder {
Handle<SeqString>::cast(current_part()), current_index_));
}
+ void AppendStringByCopy(Handle<String> string);
+ bool CanAppendByCopy(Handle<String> string);
+
static const int kInitialPartLength = 32;
static const int kMaxPartLength = 16 * 1024;
static const int kPartLengthGrowthFactor = 2;
+ static const int kIntToCStringBufferSize = 100;
Isolate* isolate_;
String::Encoding encoding_;
diff --git a/deps/v8/src/strings/string-builder.cc b/deps/v8/src/strings/string-builder.cc
index f647aed190..cfb9a55412 100644
--- a/deps/v8/src/strings/string-builder.cc
+++ b/deps/v8/src/strings/string-builder.cc
@@ -284,7 +284,41 @@ MaybeHandle<String> IncrementalStringBuilder::Finish() {
return accumulator();
}
+// Short strings can be copied directly to {current_part_}.
+// Requires the IncrementalStringBuilder to either have two byte encoding or
+// the incoming string to have one byte representation "underneath" (The
+// one byte check requires the string to be flat).
+bool IncrementalStringBuilder::CanAppendByCopy(Handle<String> string) {
+ constexpr int kMaxStringLengthForCopy = 16;
+ const bool representation_ok =
+ encoding_ == String::TWO_BYTE_ENCODING ||
+ (string->IsFlat() && String::IsOneByteRepresentationUnderneath(*string));
+
+ return representation_ok && string->length() <= kMaxStringLengthForCopy &&
+ CurrentPartCanFit(string->length());
+}
+
+void IncrementalStringBuilder::AppendStringByCopy(Handle<String> string) {
+ DCHECK(CanAppendByCopy(string));
+
+ Handle<SeqOneByteString> part =
+ Handle<SeqOneByteString>::cast(current_part());
+ {
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*string, part->GetChars(no_gc) + current_index_, 0,
+ string->length());
+ }
+ current_index_ += string->length();
+ DCHECK(current_index_ <= part_length_);
+ if (current_index_ == part_length_) Extend();
+}
+
void IncrementalStringBuilder::AppendString(Handle<String> string) {
+ if (CanAppendByCopy(string)) {
+ AppendStringByCopy(string);
+ return;
+ }
+
ShrinkCurrentPart();
part_length_ = kInitialPartLength; // Allocate conservatively.
Extend(); // Attach current part and allocate new part.
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index db1891949e..25a8ffc3c1 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -378,8 +378,9 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
printee.ShortPrint(this);
Add("\n");
if (printee.IsJSObject()) {
- if (printee.IsJSValue()) {
- Add(" value(): %o\n", JSValue::cast(printee).value());
+ if (printee.IsJSPrimitiveWrapper()) {
+ Add(" value(): %o\n",
+ JSPrimitiveWrapper::cast(printee).value());
}
PrintUsingMap(JSObject::cast(printee));
if (printee.IsJSArray()) {
diff --git a/deps/v8/src/tasks/OWNERS b/deps/v8/src/tasks/OWNERS
new file mode 100644
index 0000000000..2c6630da0c
--- /dev/null
+++ b/deps/v8/src/tasks/OWNERS
@@ -0,0 +1,6 @@
+ahaas@chromium.org
+clemensh@chromium.org
+mlippautz@chromium.org
+mstarzinger@chromium.org
+rmcilroy@chromium.org
+ulan@chromium.org
diff --git a/deps/v8/src/third_party/siphash/OWNERS b/deps/v8/src/third_party/siphash/OWNERS
new file mode 100644
index 0000000000..208670527f
--- /dev/null
+++ b/deps/v8/src/third_party/siphash/OWNERS
@@ -0,0 +1,3 @@
+sigurds@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/third_party/utf8-decoder/OWNERS b/deps/v8/src/third_party/utf8-decoder/OWNERS
new file mode 100644
index 0000000000..c008e4cbce
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/OWNERS
@@ -0,0 +1,2 @@
+mathias@chromium.org
+marja@chromium.org
diff --git a/deps/v8/src/third_party/valgrind/OWNERS b/deps/v8/src/third_party/valgrind/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/third_party/valgrind/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/third_party/vtune/OWNERS b/deps/v8/src/third_party/vtune/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index f26e9b2326..23de121065 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -6,13 +6,16 @@
#define V8_TORQUE_AST_H_
#include <iostream>
+#include <map>
#include <memory>
+#include <set>
#include <string>
#include <vector>
#include "src/base/optional.h"
#include "src/torque/constants.h"
#include "src/torque/source-positions.h"
+#include "src/torque/utils.h"
namespace v8 {
namespace internal {
@@ -52,7 +55,6 @@ namespace torque {
V(IfStatement) \
V(WhileStatement) \
V(ForLoopStatement) \
- V(ForOfLoopStatement) \
V(BreakStatement) \
V(ContinueStatement) \
V(ReturnStatement) \
@@ -143,6 +145,12 @@ struct AstNodeClassCheck {
struct Expression : AstNode {
Expression(Kind kind, SourcePosition pos) : AstNode(kind, pos) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(Expression)
+
+ using VisitCallback = std::function<void(Expression*)>;
+ virtual void VisitAllSubExpressions(VisitCallback callback) {
+ // TODO(szuend): Hoist this up to AstNode and make it a
+ // general Ast visitor.
+ }
};
struct LocationExpression : Expression {
@@ -193,9 +201,14 @@ class Ast {
return result;
}
+ void DeclareImportForCurrentFile(SourceId import_id) {
+ declared_imports_[CurrentSourcePosition::Get().source].insert(import_id);
+ }
+
private:
std::vector<Declaration*> declarations_;
std::vector<std::unique_ptr<AstNode>> nodes_;
+ std::map<SourceId, std::set<SourceId>> declared_imports_;
};
static const char* const kThisParameterName = "this";
@@ -227,6 +240,11 @@ struct IdentifierExpression : LocationExpression {
std::vector<TypeExpression*> args = {})
: IdentifierExpression(pos, {}, name, std::move(args)) {}
bool IsThis() const { return name->value == kThisParameterName; }
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callback(this);
+ }
+
std::vector<std::string> namespace_qualification;
Identifier* name;
std::vector<TypeExpression*> generic_arguments;
@@ -241,6 +259,14 @@ struct IntrinsicCallExpression : Expression {
name(std::move(name)),
generic_arguments(std::move(generic_arguments)),
arguments(std::move(arguments)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ for (auto argument : arguments) {
+ argument->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
std::string name;
std::vector<TypeExpression*> generic_arguments;
std::vector<Expression*> arguments;
@@ -257,6 +283,16 @@ struct CallMethodExpression : Expression {
method(method),
arguments(std::move(arguments)),
labels(std::move(labels)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ target->VisitAllSubExpressions(callback);
+ method->VisitAllSubExpressions(callback);
+ for (auto argument : arguments) {
+ argument->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
Expression* target;
IdentifierExpression* method;
std::vector<Expression*> arguments;
@@ -272,6 +308,15 @@ struct CallExpression : Expression {
callee(callee),
arguments(std::move(arguments)),
labels(std::move(labels)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callee->VisitAllSubExpressions(callback);
+ for (auto argument : arguments) {
+ argument->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
IdentifierExpression* callee;
std::vector<Expression*> arguments;
std::vector<Identifier*> labels;
@@ -289,6 +334,14 @@ struct StructExpression : Expression {
: Expression(kKind, pos),
type(type),
initializers(std::move(initializers)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ for (auto& initializer : initializers) {
+ initializer.expression->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
TypeExpression* type;
std::vector<NameAndExpression> initializers;
};
@@ -297,6 +350,13 @@ struct LogicalOrExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LogicalOrExpression)
LogicalOrExpression(SourcePosition pos, Expression* left, Expression* right)
: Expression(kKind, pos), left(left), right(right) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ left->VisitAllSubExpressions(callback);
+ right->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* left;
Expression* right;
};
@@ -305,6 +365,13 @@ struct LogicalAndExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LogicalAndExpression)
LogicalAndExpression(SourcePosition pos, Expression* left, Expression* right)
: Expression(kKind, pos), left(left), right(right) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ left->VisitAllSubExpressions(callback);
+ right->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* left;
Expression* right;
};
@@ -313,6 +380,12 @@ struct SpreadExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(SpreadExpression)
SpreadExpression(SourcePosition pos, Expression* spreadee)
: Expression(kKind, pos), spreadee(spreadee) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ spreadee->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* spreadee;
};
@@ -324,6 +397,14 @@ struct ConditionalExpression : Expression {
condition(condition),
if_true(if_true),
if_false(if_false) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ condition->VisitAllSubExpressions(callback);
+ if_true->VisitAllSubExpressions(callback);
+ if_false->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* condition;
Expression* if_true;
Expression* if_false;
@@ -333,6 +414,11 @@ struct StringLiteralExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StringLiteralExpression)
StringLiteralExpression(SourcePosition pos, std::string literal)
: Expression(kKind, pos), literal(std::move(literal)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callback(this);
+ }
+
std::string literal;
};
@@ -340,6 +426,11 @@ struct NumberLiteralExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(NumberLiteralExpression)
NumberLiteralExpression(SourcePosition pos, std::string name)
: Expression(kKind, pos), number(std::move(name)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callback(this);
+ }
+
std::string number;
};
@@ -348,6 +439,13 @@ struct ElementAccessExpression : LocationExpression {
ElementAccessExpression(SourcePosition pos, Expression* array,
Expression* index)
: LocationExpression(kKind, pos), array(array), index(index) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ array->VisitAllSubExpressions(callback);
+ index->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* array;
Expression* index;
};
@@ -357,6 +455,12 @@ struct FieldAccessExpression : LocationExpression {
FieldAccessExpression(SourcePosition pos, Expression* object,
Identifier* field)
: LocationExpression(kKind, pos), object(object), field(field) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ object->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* object;
Identifier* field;
};
@@ -365,6 +469,12 @@ struct DereferenceExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(DereferenceExpression)
DereferenceExpression(SourcePosition pos, Expression* reference)
: LocationExpression(kKind, pos), reference(reference) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ reference->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* reference;
};
@@ -379,6 +489,13 @@ struct AssignmentExpression : Expression {
location(location),
op(std::move(op)),
value(value) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ location->VisitAllSubExpressions(callback);
+ value->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* location;
base::Optional<std::string> op;
Expression* value;
@@ -391,6 +508,12 @@ struct IncrementDecrementExpression : Expression {
IncrementDecrementExpression(SourcePosition pos, Expression* location,
IncrementDecrementOperator op, bool postfix)
: Expression(kKind, pos), location(location), op(op), postfix(postfix) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ location->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* location;
IncrementDecrementOperator op;
bool postfix;
@@ -408,6 +531,12 @@ struct AssumeTypeImpossibleExpression : Expression {
: Expression(kKind, pos),
excluded_type(excluded_type),
expression(expression) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ expression->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
TypeExpression* excluded_type;
Expression* expression;
};
@@ -419,18 +548,30 @@ struct NewExpression : Expression {
: Expression(kKind, pos),
type(type),
initializers(std::move(initializers)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ for (auto& initializer : initializers) {
+ initializer.expression->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
TypeExpression* type;
std::vector<NameAndExpression> initializers;
};
+enum class ImplicitKind { kNoImplicit, kJSImplicit, kImplicit };
+
struct ParameterList {
std::vector<Identifier*> names;
std::vector<TypeExpression*> types;
- size_t implicit_count;
- bool has_varargs;
- std::string arguments_variable;
+ ImplicitKind implicit_kind = ImplicitKind::kNoImplicit;
+ SourcePosition implicit_kind_pos = SourcePosition::Invalid();
+ size_t implicit_count = 0;
+ bool has_varargs = false;
+ std::string arguments_variable = "";
- static ParameterList Empty() { return ParameterList{{}, {}, 0, false, ""}; }
+ static ParameterList Empty() { return {}; }
std::vector<TypeExpression*> GetImplicitTypes() {
return std::vector<TypeExpression*>(types.begin(),
types.begin() + implicit_count);
@@ -445,14 +586,17 @@ struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
BasicTypeExpression(SourcePosition pos,
std::vector<std::string> namespace_qualification,
- std::string name)
+ std::string name,
+ std::vector<TypeExpression*> generic_arguments)
: TypeExpression(kKind, pos),
namespace_qualification(std::move(namespace_qualification)),
is_constexpr(IsConstexprName(name)),
- name(std::move(name)) {}
+ name(std::move(name)),
+ generic_arguments(std::move(generic_arguments)) {}
std::vector<std::string> namespace_qualification;
bool is_constexpr;
std::string name;
+ std::vector<TypeExpression*> generic_arguments;
};
struct FunctionTypeExpression : TypeExpression {
@@ -605,31 +749,6 @@ struct ForLoopStatement : Statement {
Statement* body;
};
-struct RangeExpression {
- base::Optional<Expression*> begin;
- base::Optional<Expression*> end;
-};
-
-struct ForOfLoopStatement : Statement {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(ForOfLoopStatement)
- ForOfLoopStatement(SourcePosition pos, Statement* decl, Expression* iterable,
- base::Optional<RangeExpression> range, Statement* body)
- : Statement(kKind, pos),
- var_declaration(VarDeclarationStatement::cast(decl)),
- iterable(iterable),
- body(body) {
- if (range) {
- begin = range->begin;
- end = range->end;
- }
- }
- VarDeclarationStatement* var_declaration;
- Expression* iterable;
- base::Optional<Expression*> begin;
- base::Optional<Expression*> end;
- Statement* body;
-};
-
struct LabelBlock : AstNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LabelBlock)
LabelBlock(SourcePosition pos, Identifier* label,
@@ -710,6 +829,11 @@ struct NameAndTypeExpression {
TypeExpression* type;
};
+struct ImplicitParameters {
+ Identifier* kind;
+ std::vector<NameAndTypeExpression> parameters;
+};
+
struct StructFieldExpression {
NameAndTypeExpression name_and_type;
bool const_qualified;
@@ -769,7 +893,12 @@ struct MacroDeclaration : CallableNode {
const LabelAndTypesVector& labels)
: CallableNode(kind, pos, transitioning, std::move(name),
std::move(parameters), return_type, labels),
- op(std::move(op)) {}
+ op(std::move(op)) {
+ if (parameters.implicit_kind == ImplicitKind::kJSImplicit) {
+ Error("Cannot use \"js-implicit\" with macros, use \"implicit\" instead.")
+ .Position(parameters.implicit_kind_pos);
+ }
+ }
base::Optional<std::string> op;
};
@@ -793,7 +922,11 @@ struct IntrinsicDeclaration : CallableNode {
IntrinsicDeclaration(SourcePosition pos, std::string name,
ParameterList parameters, TypeExpression* return_type)
: CallableNode(kKind, pos, false, std::move(name), std::move(parameters),
- return_type, {}) {}
+ return_type, {}) {
+ if (parameters.implicit_kind != ImplicitKind::kNoImplicit) {
+ Error("Intinsics cannot have implicit parameters.");
+ }
+ }
};
struct TorqueMacroDeclaration : MacroDeclaration {
@@ -817,7 +950,21 @@ struct BuiltinDeclaration : CallableNode {
TypeExpression* return_type)
: CallableNode(kind, pos, transitioning, std::move(name),
std::move(parameters), return_type, {}),
- javascript_linkage(javascript_linkage) {}
+ javascript_linkage(javascript_linkage) {
+ if (parameters.implicit_kind == ImplicitKind::kJSImplicit &&
+ !javascript_linkage) {
+ Error(
+ "\"js-implicit\" is for implicit parameters passed according to the "
+ "JavaScript calling convention. Use \"implicit\" instead.");
+ }
+ if (parameters.implicit_kind == ImplicitKind::kImplicit &&
+ javascript_linkage) {
+ Error(
+ "The JavaScript calling convention implicitly passes a fixed set of "
+ "values. Use \"js-implicit\" to refer to those.")
+ .Position(parameters.implicit_kind_pos);
+ }
+ }
bool javascript_linkage;
};
@@ -926,12 +1073,17 @@ struct StructDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructDeclaration)
StructDeclaration(SourcePosition pos, Identifier* name,
std::vector<Declaration*> methods,
- std::vector<StructFieldExpression> fields)
+ std::vector<StructFieldExpression> fields,
+ std::vector<Identifier*> generic_parameters)
: TypeDeclaration(kKind, pos, name),
methods(std::move(methods)),
- fields(std::move(fields)) {}
+ fields(std::move(fields)),
+ generic_parameters(std::move(generic_parameters)) {}
std::vector<Declaration*> methods;
std::vector<StructFieldExpression> fields;
+ std::vector<Identifier*> generic_parameters;
+
+ bool IsGeneric() const { return !generic_parameters.empty(); }
};
struct ClassDeclaration : TypeDeclaration {
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 650b134140..4ad3a6ec3c 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -18,16 +18,20 @@ static const char* const CONSTEXPR_TYPE_PREFIX = "constexpr ";
static const char* const NEVER_TYPE_STRING = "never";
static const char* const CONSTEXPR_BOOL_TYPE_STRING = "constexpr bool";
static const char* const CONSTEXPR_INTPTR_TYPE_STRING = "constexpr intptr";
+static const char* const CONSTEXPR_INSTANCE_TYPE_TYPE_STRING =
+ "constexpr InstanceType";
static const char* const BOOL_TYPE_STRING = "bool";
static const char* const VOID_TYPE_STRING = "void";
static const char* const ARGUMENTS_TYPE_STRING = "Arguments";
static const char* const CONTEXT_TYPE_STRING = "Context";
+static const char* const JS_FUNCTION_TYPE_STRING = "JSFunction";
static const char* const MAP_TYPE_STRING = "Map";
static const char* const OBJECT_TYPE_STRING = "Object";
static const char* const HEAP_OBJECT_TYPE_STRING = "HeapObject";
static const char* const JSOBJECT_TYPE_STRING = "JSObject";
static const char* const SMI_TYPE_STRING = "Smi";
static const char* const TAGGED_TYPE_STRING = "Tagged";
+static const char* const UNINITIALIZED_TYPE_STRING = "Uninitialized";
static const char* const RAWPTR_TYPE_STRING = "RawPtr";
static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
static const char* const STRING_TYPE_STRING = "String";
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/torque/contextual.h
index 628d5b8514..92d2bdf3d7 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/torque/contextual.h
@@ -14,6 +14,9 @@ namespace v8 {
namespace internal {
namespace torque {
+template <class Variable>
+V8_EXPORT_PRIVATE typename Variable::VariableType*& ContextualVariableTop();
+
// {ContextualVariable} provides a clean alternative to a global variable.
// The contextual variable is mutable, and supports managing the value of
// a variable in a well-nested fashion via the {Scope} class.
@@ -66,7 +69,9 @@ class ContextualVariable {
}
private:
- V8_EXPORT_PRIVATE static VarType*& Top();
+ template <class T>
+ friend typename T::VariableType*& ContextualVariableTop();
+ static VarType*& Top() { return ContextualVariableTop<Derived>(); }
static bool HasScope() { return Top() != nullptr; }
friend class MessageBuilder;
@@ -77,12 +82,11 @@ class ContextualVariable {
struct VarName \
: v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {}
-#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
- template <> \
- V8_EXPORT_PRIVATE VarName::VariableType*& \
- ContextualVariable<VarName, VarName::VariableType>::Top() { \
- static thread_local VarName::VariableType* top = nullptr; \
- return top; \
+#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
+ template <> \
+ V8_EXPORT_PRIVATE VarName::VariableType*& ContextualVariableTop<VarName>() { \
+ static thread_local VarName::VariableType* top = nullptr; \
+ return top; \
}
// By inheriting from {ContextualClass} a class can become a contextual variable
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index a29f832e7d..6a798a2707 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -56,14 +56,10 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
}
void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
- std::string file = SourceFileMap::GetSource(pos.source);
+ const std::string& file = SourceFileMap::AbsolutePath(pos.source);
if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
// Lines in Torque SourcePositions are zero-based, while the
// CodeStubAssembler and downwind systems are one-based.
- for (auto& c : file) {
- if (c == '\\')
- c = '/';
- }
out_ << " ca_.SetSourcePosition(\"" << file << "\", "
<< (pos.start.line + 1) << ");\n";
previous_position_ = pos;
@@ -260,9 +256,8 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
} else if (instruction.intrinsic->ExternalName() == "%Allocate") {
out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler(state_).Allocate";
- } else if (instruction.intrinsic->ExternalName() ==
- "%AllocateInternalClass") {
- out_ << "CodeStubAssembler(state_).AllocateUninitializedFixedArray";
+ } else if (instruction.intrinsic->ExternalName() == "%GetStructMap") {
+ out_ << "CodeStubAssembler(state_).GetStructMap";
} else {
ReportError("no built in intrinsic with name " +
instruction.intrinsic->ExternalName());
@@ -318,8 +313,7 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
out_ << ") = ";
} else {
if (results.size() == 1) {
- out_ << results[0] << " = ca_.UncheckedCast<"
- << return_type->GetGeneratedTNodeTypeName() << ">(";
+ out_ << results[0] << " = ";
} else {
DCHECK_EQ(0, results.size());
}
@@ -334,7 +328,6 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
if (needs_flattening) {
out_ << ").Flatten();\n";
} else {
- if (results.size() == 1) out_ << ")";
out_ << ");\n";
}
PostCallableExceptionPreparation(catch_name, return_type,
@@ -528,9 +521,9 @@ std::string CSAGenerator::PreCallableExceptionPreparation(
if (catch_block) {
catch_name = FreshCatchName();
out_ << " compiler::CodeAssemblerExceptionHandlerLabel " << catch_name
- << "_label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
+ << "__label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
out_ << " { compiler::CodeAssemblerScopedExceptionHandler s(&ca_, &"
- << catch_name << "_label);\n";
+ << catch_name << "__label);\n";
}
return catch_name;
}
@@ -541,7 +534,7 @@ void CSAGenerator::PostCallableExceptionPreparation(
if (catch_block) {
std::string block_name = BlockName(*catch_block);
out_ << " }\n";
- out_ << " if (" << catch_name << "_label.is_used()) {\n";
+ out_ << " if (" << catch_name << "__label.is_used()) {\n";
out_ << " compiler::CodeAssemblerLabel " << catch_name
<< "_skip(&ca_);\n";
if (!return_type->IsNever()) {
@@ -549,7 +542,7 @@ void CSAGenerator::PostCallableExceptionPreparation(
}
out_ << " compiler::TNode<Object> " << catch_name
<< "_exception_object;\n";
- out_ << " ca_.Bind(&" << catch_name << "_label, &" << catch_name
+ out_ << " ca_.Bind(&" << catch_name << "__label, &" << catch_name
<< "_exception_object);\n";
out_ << " ca_.Goto(&" << block_name;
for (size_t i = 0; i < stack->Size(); ++i) {
@@ -695,8 +688,8 @@ void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
out_ << " CodeStubAssembler(state_).DebugBreak();\n";
break;
case AbortInstruction::Kind::kAssertionFailure: {
- std::string file =
- StringLiteralQuote(SourceFileMap::GetSource(instruction.pos.source));
+ std::string file = StringLiteralQuote(
+ SourceFileMap::PathFromV8Root(instruction.pos.source));
out_ << " CodeStubAssembler(state_).FailAssert("
<< StringLiteralQuote(instruction.message) << ", " << file << ", "
<< instruction.pos.start.line + 1 << ");\n";
@@ -723,12 +716,8 @@ void CSAGenerator::EmitInstruction(
out_ << " compiler::TNode<IntPtrT> " << offset_name
<< " = ca_.IntPtrConstant(";
- if (instruction.class_type->IsExtern()) {
out_ << field.aggregate->GetGeneratedTNodeTypeName() << "::k"
<< CamelifyString(field.name_and_type.name) << "Offset";
- } else {
- out_ << "FixedArray::kHeaderSize + " << field.offset;
- }
out_ << ");\n"
<< " USE(" << stack->Top() << ");\n";
}
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index afa6d50d94..cf6fd2554b 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -50,6 +50,7 @@ class Declarable {
kRuntimeFunction,
kIntrinsic,
kGeneric,
+ kGenericStructType,
kTypeAlias,
kExternConstant,
kNamespaceConstant
@@ -64,6 +65,7 @@ class Declarable {
bool IsBuiltin() const { return kind() == kBuiltin; }
bool IsRuntimeFunction() const { return kind() == kRuntimeFunction; }
bool IsGeneric() const { return kind() == kGeneric; }
+ bool IsGenericStructType() const { return kind() == kGenericStructType; }
bool IsTypeAlias() const { return kind() == kTypeAlias; }
bool IsExternConstant() const { return kind() == kExternConstant; }
bool IsNamespaceConstant() const { return kind() == kNamespaceConstant; }
@@ -183,15 +185,9 @@ class Namespace : public Scope {
const std::string& name() const { return name_; }
bool IsDefaultNamespace() const;
bool IsTestNamespace() const;
- std::ostream& source_stream() { return source_stream_; }
- std::ostream& header_stream() { return header_stream_; }
- std::string source() { return source_stream_.str(); }
- std::string header() { return header_stream_.str(); }
private:
std::string name_;
- std::stringstream header_stream_;
- std::stringstream source_stream_;
};
inline Namespace* CurrentNamespace() {
@@ -318,16 +314,23 @@ class Macro : public Callable {
return Callable::ShouldBeInlined();
}
+ void SetUsed() { used_ = true; }
+ bool IsUsed() const { return used_; }
+
protected:
Macro(Declarable::Kind kind, std::string external_name,
std::string readable_name, const Signature& signature,
bool transitioning, base::Optional<Statement*> body)
: Callable(kind, std::move(external_name), std::move(readable_name),
- signature, transitioning, body) {
+ signature, transitioning, body),
+ used_(false) {
if (signature.parameter_types.var_args) {
ReportError("Varargs are not supported for macros.");
}
}
+
+ private:
+ bool used_;
};
class ExternMacro : public Macro {
@@ -449,26 +452,43 @@ class Intrinsic : public Callable {
}
};
-class Generic : public Declarable {
+template <class T>
+class SpecializationMap {
+ private:
+ using Map = std::unordered_map<TypeVector, T*, base::hash<TypeVector>>;
+
public:
- DECLARE_DECLARABLE_BOILERPLATE(Generic, generic)
+ SpecializationMap() {}
- GenericDeclaration* declaration() const { return declaration_; }
- const std::vector<Identifier*> generic_parameters() const {
- return declaration()->generic_parameters;
- }
- const std::string& name() const { return name_; }
- void AddSpecialization(const TypeVector& type_arguments,
- Callable* specialization) {
+ void Add(const TypeVector& type_arguments, T* specialization) {
DCHECK_EQ(0, specializations_.count(type_arguments));
specializations_[type_arguments] = specialization;
}
- base::Optional<Callable*> GetSpecialization(
- const TypeVector& type_arguments) const {
+ base::Optional<T*> Get(const TypeVector& type_arguments) const {
auto it = specializations_.find(type_arguments);
if (it != specializations_.end()) return it->second;
return base::nullopt;
}
+
+ using iterator = typename Map::const_iterator;
+ iterator begin() const { return specializations_.begin(); }
+ iterator end() const { return specializations_.end(); }
+
+ private:
+ Map specializations_;
+};
+
+class Generic : public Declarable {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(Generic, generic)
+
+ const std::string& name() const { return name_; }
+ GenericDeclaration* declaration() const { return declaration_; }
+ const std::vector<Identifier*> generic_parameters() const {
+ return declaration()->generic_parameters;
+ }
+ SpecializationMap<Callable>& specializations() { return specializations_; }
+
base::Optional<TypeVector> InferSpecializationTypes(
const TypeVector& explicit_specialization_types,
const TypeVector& arguments);
@@ -481,9 +501,8 @@ class Generic : public Declarable {
declaration_(declaration) {}
std::string name_;
- std::unordered_map<TypeVector, Callable*, base::hash<TypeVector>>
- specializations_;
GenericDeclaration* declaration_;
+ SpecializationMap<Callable> specializations_;
};
struct SpecializationKey {
@@ -491,6 +510,32 @@ struct SpecializationKey {
TypeVector specialized_types;
};
+class GenericStructType : public Declarable {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(GenericStructType, generic_type)
+ const std::string& name() const { return name_; }
+ StructDeclaration* declaration() const { return declaration_; }
+ const std::vector<Identifier*>& generic_parameters() const {
+ return declaration_->generic_parameters;
+ }
+ SpecializationMap<const StructType>& specializations() {
+ return specializations_;
+ }
+
+ private:
+ friend class Declarations;
+ GenericStructType(const std::string& name, StructDeclaration* declaration)
+ : Declarable(Declarable::kGenericStructType),
+ name_(name),
+ declaration_(declaration) {
+ DCHECK_GT(declaration->generic_parameters.size(), 0);
+ }
+
+ std::string name_;
+ StructDeclaration* declaration_;
+ SpecializationMap<const StructType> specializations_;
+};
+
class TypeAlias : public Declarable {
public:
DECLARE_DECLARABLE_BOILERPLATE(TypeAlias, type_alias)
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 34914d7b72..e0e996f33b 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -76,28 +76,12 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Builtin::Kind kind = !javascript ? Builtin::kStub
: varargs ? Builtin::kVarArgsJavaScript
: Builtin::kFixedArgsJavaScript;
- const Type* context_type =
- Declarations::LookupGlobalType(CONTEXT_TYPE_STRING);
- if (signature.types().size() == 0 ||
- !(signature.types()[0] == context_type)) {
- Error("First parameter to builtin ", decl->name, " must be of type ",
- *context_type);
- }
if (varargs && !javascript) {
Error("Rest parameters require ", decl->name,
" to be a JavaScript builtin");
}
- if (javascript) {
- if (signature.types().size() >= 2 &&
- !(signature.types()[1] ==
- Declarations::LookupGlobalType(OBJECT_TYPE_STRING))) {
- Error("Second parameter to javascript builtin ", decl->name, " is ",
- *signature.types()[1], " but should be Object");
- }
- }
-
for (size_t i = 0; i < signature.types().size(); ++i) {
if (const StructType* type =
StructType::DynamicCast(signature.types()[i])) {
@@ -136,8 +120,7 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
const Signature& signature,
base::Optional<Statement*> body) {
if (signature.parameter_types.types.size() == 0 ||
- !(signature.parameter_types.types[0] ==
- Declarations::LookupGlobalType(CONTEXT_TYPE_STRING))) {
+ !(signature.parameter_types.types[0] == TypeOracle::GetContextType())) {
ReportError(
"first parameter to runtime functions has to be the context and have "
"type Context, but found type ",
@@ -350,7 +333,7 @@ Callable* DeclarationVisitor::Specialize(
<< std::to_string(generic_parameter_count) << ")";
ReportError(stream.str());
}
- if (key.generic->GetSpecialization(key.specialized_types)) {
+ if (key.generic->specializations().Get(key.specialized_types)) {
ReportError("cannot redeclare specialization of ", key.generic->name(),
" with types <", key.specialized_types, ">");
}
@@ -381,7 +364,7 @@ Callable* DeclarationVisitor::Specialize(
callable = CreateBuiltin(builtin, generated_name, readable_name.str(),
type_signature, *body);
}
- key.generic->AddSpecialization(key.specialized_types, callable);
+ key.generic->specializations().Add(key.specialized_types, callable);
return callable;
}
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index 4c6053d86a..dbd28f4b87 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -37,6 +37,13 @@ class PredeclarationVisitor {
static void Predeclare(TypeDeclaration* decl) {
Declarations::PredeclareTypeAlias(decl->name, decl, false);
}
+ static void Predeclare(StructDeclaration* decl) {
+ if (decl->IsGeneric()) {
+ Declarations::DeclareGenericStructType(decl->name->value, decl);
+ } else {
+ Declarations::PredeclareTypeAlias(decl->name, decl, false);
+ }
+ }
static void Predeclare(GenericDeclaration* decl) {
Declarations::DeclareGeneric(decl->callable->name, decl);
}
@@ -59,6 +66,11 @@ class DeclarationVisitor {
// are reported even if the type is unused.
Declarations::LookupType(decl->name);
}
+ static void Visit(StructDeclaration* decl) {
+ if (!decl->IsGeneric()) {
+ Declarations::LookupType(decl->name);
+ }
+ }
static Builtin* CreateBuiltin(BuiltinDeclaration* decl,
std::string external_name,
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index f3f3e84cad..73d46d6998 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -133,6 +133,12 @@ Generic* Declarations::LookupUniqueGeneric(const QualifiedName& name) {
"generic");
}
+GenericStructType* Declarations::LookupUniqueGenericStructType(
+ const QualifiedName& name) {
+ return EnsureUnique(FilterDeclarables<GenericStructType>(Lookup(name)), name,
+ "generic struct");
+}
+
Namespace* Declarations::DeclareNamespace(const std::string& name) {
return Declare(name, std::unique_ptr<Namespace>(new Namespace(name)));
}
@@ -278,6 +284,12 @@ Generic* Declarations::DeclareGeneric(const std::string& name,
return Declare(name, std::unique_ptr<Generic>(new Generic(name, generic)));
}
+GenericStructType* Declarations::DeclareGenericStructType(
+ const std::string& name, StructDeclaration* decl) {
+ return Declare(name, std::unique_ptr<GenericStructType>(
+ new GenericStructType(name, decl)));
+}
+
std::string Declarations::GetGeneratedCallableName(
const std::string& name, const TypeVector& specialized_types) {
std::string result = name;
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 0dd9be9974..00e0facefe 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -15,8 +15,13 @@ namespace internal {
namespace torque {
static constexpr const char* const kFromConstexprMacroName = "FromConstexpr";
-static constexpr const char* kTrueLabelName = "_True";
-static constexpr const char* kFalseLabelName = "_False";
+static constexpr const char* kTrueLabelName = "__True";
+static constexpr const char* kFalseLabelName = "__False";
+static constexpr const char* kMacroEndLabelName = "__macro_end";
+static constexpr const char* kBreakLabelName = "__break";
+static constexpr const char* kContinueLabelName = "__continue";
+static constexpr const char* kCatchLabelName = "__catch";
+static constexpr const char* kNextCaseLabelName = "__NextCase";
template <class T>
std::vector<T*> FilterDeclarables(const std::vector<Declarable*> list) {
@@ -71,6 +76,9 @@ class Declarations {
static std::vector<Generic*> LookupGeneric(const std::string& name);
static Generic* LookupUniqueGeneric(const QualifiedName& name);
+ static GenericStructType* LookupUniqueGenericStructType(
+ const QualifiedName& name);
+
static Namespace* DeclareNamespace(const std::string& name);
static TypeAlias* DeclareType(const Identifier* name, const Type* type);
@@ -124,6 +132,8 @@ class Declarations {
static Generic* DeclareGeneric(const std::string& name,
GenericDeclaration* generic);
+ static GenericStructType* DeclareGenericStructType(const std::string& name,
+ StructDeclaration* decl);
template <class T>
static T* Declare(const std::string& name, T* d) {
diff --git a/deps/v8/src/torque/earley-parser.cc b/deps/v8/src/torque/earley-parser.cc
index ff12d4a449..9ebb132c82 100644
--- a/deps/v8/src/torque/earley-parser.cc
+++ b/deps/v8/src/torque/earley-parser.cc
@@ -159,26 +159,21 @@ Symbol* Lexer::MatchToken(InputPosition* pos, InputPosition end) {
symbol = &pair.second;
}
}
- // Check if matched pattern coincides with a keyword. Prefer the keyword in
- // this case.
- if (*pos != token_start) {
- auto found_keyword = keywords_.find(std::string(token_start, *pos));
- if (found_keyword != keywords_.end()) {
- return &found_keyword->second;
- }
- return symbol;
- }
- // Now check for a keyword (that doesn't overlap with a pattern).
- // Iterate from the end to ensure that if one keyword is a prefix of another,
- // we first try to match the longer one.
+ size_t pattern_size = *pos - token_start;
+
+ // Now check for keywords. Prefer keywords over patterns unless the pattern is
+ // longer. Iterate from the end to ensure that if one keyword is a prefix of
+ // another, we first try to match the longer one.
for (auto it = keywords_.rbegin(); it != keywords_.rend(); ++it) {
const std::string& keyword = it->first;
- if (static_cast<size_t>(end - *pos) < keyword.size()) continue;
- if (keyword == std::string(*pos, *pos + keyword.size())) {
- *pos += keyword.size();
+ if (static_cast<size_t>(end - token_start) < keyword.size()) continue;
+ if (keyword.size() >= pattern_size &&
+ keyword == std::string(token_start, token_start + keyword.size())) {
+ *pos = token_start + keyword.size();
return &it->second;
}
}
+ if (pattern_size > 0) return symbol;
return nullptr;
}
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 9d9cfb02c0..d3d0c89c42 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -53,6 +53,8 @@ enum class ParseResultHolderBase::TypeId {
kLabelBlockPtr,
kOptionalLabelBlockPtr,
kNameAndTypeExpression,
+ kImplicitParameters,
+ kOptionalImplicitParameters,
kNameAndExpression,
kConditionalAnnotation,
kOptionalConditionalAnnotation,
@@ -70,8 +72,6 @@ enum class ParseResultHolderBase::TypeId {
kStdVectorOfExpressionPtr,
kExpressionWithSource,
kParameterList,
- kRangeExpression,
- kOptionalRangeExpression,
kTypeList,
kOptionalTypeList,
kLabelAndTypes,
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
new file mode 100644
index 0000000000..f258f18474
--- /dev/null
+++ b/deps/v8/src/torque/global-context.cc
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/global-context.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+GlobalContext::GlobalContext(Ast ast)
+ : collect_language_server_data_(false),
+ force_assert_statements_(false),
+ ast_(std::move(ast)) {
+ CurrentScope::Scope current_scope(nullptr);
+ CurrentSourcePosition::Scope current_source_position(
+ SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
+ default_namespace_ =
+ RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName));
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index aa70b23fb5..e103a22575 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -7,9 +7,9 @@
#include <map>
+#include "src/torque/ast.h"
+#include "src/torque/contextual.h"
#include "src/torque/declarable.h"
-#include "src/torque/declarations.h"
-#include "src/torque/type-oracle.h"
namespace v8 {
namespace internal {
@@ -19,16 +19,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
public:
GlobalContext(GlobalContext&&) V8_NOEXCEPT = default;
GlobalContext& operator=(GlobalContext&&) V8_NOEXCEPT = default;
- explicit GlobalContext(Ast ast)
- : collect_language_server_data_(false),
- force_assert_statements_(false),
- ast_(std::move(ast)) {
- CurrentScope::Scope current_scope(nullptr);
- CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
- default_namespace_ =
- RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName));
- }
+ explicit GlobalContext(Ast ast);
+
static Namespace* GetDefaultNamespace() { return Get().default_namespace_; }
template <class T>
T* RegisterDeclarable(std::unique_ptr<T> d) {
@@ -41,16 +33,6 @@ class GlobalContext : public ContextualClass<GlobalContext> {
return Get().declarables_;
}
- static const std::vector<Namespace*> GetNamespaces() {
- std::vector<Namespace*> result;
- for (auto& declarable : AllDeclarables()) {
- if (Namespace* n = Namespace::DynamicCast(declarable.get())) {
- result.push_back(n);
- }
- }
- return result;
- }
-
static void RegisterClass(const TypeAlias* alias) {
DCHECK(alias->ParentScope()->IsNamespace());
Get().classes_.push_back(alias);
@@ -82,6 +64,14 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static Ast* ast() { return &Get().ast_; }
static size_t FreshId() { return Get().fresh_id_++; }
+ struct PerFileStreams {
+ std::stringstream csa_headerfile;
+ std::stringstream csa_ccfile;
+ };
+ static PerFileStreams& GeneratedPerFile(SourceId file) {
+ return Get().generated_per_file_[file];
+ }
+
private:
bool collect_language_server_data_;
bool force_assert_statements_;
@@ -89,6 +79,7 @@ class GlobalContext : public ContextualClass<GlobalContext> {
Ast ast_;
std::vector<std::unique_ptr<Declarable>> declarables_;
std::vector<std::string> cpp_includes_;
+ std::map<SourceId, PerFileStreams> generated_per_file_;
GlobalClassList classes_;
size_t fresh_id_ = 0;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index d4798b28cb..a0aeeee81b 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -48,61 +48,62 @@ const Type* ImplementationVisitor::Visit(Statement* stmt) {
return result;
}
-void ImplementationVisitor::BeginNamespaceFile(Namespace* nspace) {
- std::ostream& source = nspace->source_stream();
- std::ostream& header = nspace->header_stream();
+void ImplementationVisitor::BeginCSAFiles() {
+ for (SourceId file : SourceFileMap::AllSources()) {
+ std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
+ std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
- for (const std::string& include_path : GlobalContext::CppIncludes()) {
- source << "#include " << StringLiteralQuote(include_path) << "\n";
- }
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ source << "#include " << StringLiteralQuote(include_path) << "\n";
+ }
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- source << "#include \"torque-generated/builtins-" +
- DashifyString(n->name()) + "-gen-tq.h\"\n";
- }
- source << "\n";
+ for (SourceId file : SourceFileMap::AllSources()) {
+ source << "#include \"torque-generated/" +
+ SourceFileMap::PathFromV8RootWithoutExtension(file) +
+ "-tq-csa.h\"\n";
+ }
+ source << "\n";
- source << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
+ source << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
- std::string upper_name(nspace->name());
- transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
- ::toupper);
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" + upper_name + "_NAMESPACE_TQ_H_";
- header << "#ifndef " << headerDefine << "\n";
- header << "#define " << headerDefine << "\n\n";
- header << "#include \"src/compiler/code-assembler.h\"\n";
- header << "#include \"src/codegen/code-stub-assembler.h\"\n";
- header << "#include \"src/utils/utils.h\"\n";
- header << "#include \"torque-generated/field-offsets-tq.h\"\n";
- header << "#include \"torque-generated/csa-types-tq.h\"\n";
- header << "\n";
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+ header << "#ifndef " << headerDefine << "\n";
+ header << "#define " << headerDefine << "\n\n";
+ header << "#include \"src/compiler/code-assembler.h\"\n";
+ header << "#include \"src/codegen/code-stub-assembler.h\"\n";
+ header << "#include \"src/utils/utils.h\"\n";
+ header << "#include \"torque-generated/field-offsets-tq.h\"\n";
+ header << "#include \"torque-generated/csa-types-tq.h\"\n";
+ header << "\n";
- header << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
+ header << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+ }
}
-void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
- std::ostream& source = nspace->source_stream();
- std::ostream& header = nspace->header_stream();
+void ImplementationVisitor::EndCSAFiles() {
+ for (SourceId file : SourceFileMap::AllSources()) {
+ std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
+ std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
- std::string upper_name(nspace->name());
- transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
- ::toupper);
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" + upper_name + "_NAMESPACE_V8_H_";
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
- source << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
+ source << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
- header << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
- header << "#endif // " << headerDefine << "\n";
+ header << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ header << "#endif // " << headerDefine << "\n";
+ }
}
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
@@ -179,14 +180,15 @@ VisitResult ImplementationVisitor::InlineMacro(
DCHECK(macro->IsMethod());
LocalValue this_value = LocalValue{!this_reference->IsVariableAccess(),
this_reference->GetVisitResult()};
- parameter_bindings.Add(kThisParameterName, this_value);
+ parameter_bindings.Add(kThisParameterName, this_value, true);
}
size_t i = 0;
for (auto arg : arguments) {
if (this_reference && i == signature.implicit_count) i++;
+ const bool mark_as_used = signature.implicit_count > i;
const Identifier* name = macro->parameter_names()[i++];
- parameter_bindings.Add(name, LocalValue{true, arg});
+ parameter_bindings.Add(name, LocalValue{true, arg}, mark_as_used);
}
DCHECK_EQ(label_blocks.size(), signature.labels.size());
@@ -217,7 +219,7 @@ VisitResult ImplementationVisitor::InlineMacro(
}
}
macro_end = assembler().NewBlock(std::move(stack));
- macro_end_binding.emplace(&LabelBindingsManager::Get(), "_macro_end",
+ macro_end_binding.emplace(&LabelBindingsManager::Get(), kMacroEndLabelName,
LocalLabel{macro_end, {return_type}});
} else {
SetReturnValue(VisitResult::NeverResult());
@@ -380,13 +382,15 @@ namespace {
std::string AddParameter(size_t i, Builtin* builtin,
Stack<std::string>* parameters,
Stack<const Type*>* parameter_types,
- BlockBindings<LocalValue>* parameter_bindings) {
+ BlockBindings<LocalValue>* parameter_bindings,
+ bool mark_as_used) {
const Identifier* name = builtin->signature().parameter_names[i];
const Type* type = builtin->signature().types()[i];
std::string external_name = "parameter" + std::to_string(i);
parameters->Push(external_name);
StackRange range = parameter_types->PushMany(LowerType(type));
- parameter_bindings->Add(name, LocalValue{true, VisitResult(type, range)});
+ parameter_bindings->Add(name, LocalValue{true, VisitResult(type, range)},
+ mark_as_used);
return external_name;
}
@@ -395,15 +399,15 @@ std::string AddParameter(size_t i, Builtin* builtin,
void ImplementationVisitor::Visit(Builtin* builtin) {
if (builtin->IsExternal()) return;
CurrentScope::Scope current_scope(builtin);
+ CurrentCallable::Scope current_callable(builtin);
+ CurrentReturnValue::Scope current_return_value;
+
const std::string& name = builtin->ExternalName();
const Signature& signature = builtin->signature();
source_out() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
<< " compiler::CodeAssemblerState* state_ = state();"
<< " compiler::CodeAssembler ca_(state());\n";
- CurrentCallable::Scope current_callable(builtin);
- CurrentReturnValue::Scope current_return_value;
-
Stack<const Type*> parameter_types;
Stack<std::string> parameters;
@@ -411,58 +415,128 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
- // Context
- std::string parameter0 = AddParameter(0, builtin, &parameters,
- &parameter_types, &parameter_bindings);
- source_out() << " TNode<Context> " << parameter0
- << " = UncheckedCast<Context>(Parameter("
- << "Descriptor::kContext));\n";
- source_out() << " USE(" << parameter0 << ");\n";
-
- size_t first = 1;
- if (builtin->IsVarArgsJavaScript()) {
- DCHECK(signature.parameter_types.var_args);
- source_out()
- << " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
- std::string parameter1 = AddParameter(
- 1, builtin, &parameters, &parameter_types, &parameter_bindings);
- source_out()
- << " TNode<IntPtrT> arguments_length(ChangeInt32ToIntPtr(argc));\n";
- source_out() << " TNode<RawPtrT> arguments_frame = "
- "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
- source_out() << " TorqueStructArguments "
- "torque_arguments(GetFrameArguments(arguments_frame, "
- "arguments_length));\n";
- source_out() << " CodeStubArguments arguments(this, torque_arguments);\n";
-
- source_out() << " TNode<Object> " << parameter1
- << " = arguments.GetReceiver();\n";
- source_out() << "USE(" << parameter1 << ");\n";
- parameters.Push("torque_arguments.frame");
- parameters.Push("torque_arguments.base");
- parameters.Push("torque_arguments.length");
- const Type* arguments_type = TypeOracle::GetArgumentsType();
- StackRange range = parameter_types.PushMany(LowerType(arguments_type));
- parameter_bindings.Add(
- *signature.arguments_variable,
- LocalValue{true, VisitResult(arguments_type, range)});
-
- first = 2;
- }
-
- for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
- if (i < first) continue;
- const std::string& parameter_name = signature.parameter_names[i]->value;
- const Type* type = signature.types()[i];
- std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
- &parameter_bindings);
- source_out() << " " << type->GetGeneratedTypeName() << " " << var << " = "
- << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(Descriptor::k"
- << CamelifyString(parameter_name) << "));\n";
- source_out() << " USE(" << var << ");\n";
- }
+ if (builtin->IsVarArgsJavaScript() || builtin->IsFixedArgsJavaScript()) {
+ if (builtin->IsVarArgsJavaScript()) {
+ DCHECK(signature.parameter_types.var_args);
+ if (signature.ExplicitCount() > 0) {
+ Error("Cannot mix explicit parameters with varargs.")
+ .Position(signature.parameter_names[signature.implicit_count]->pos);
+ }
+ source_out()
+ << " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
+ source_out()
+ << " TNode<IntPtrT> arguments_length(ChangeInt32ToIntPtr(argc));\n";
+ source_out() << " TNode<RawPtrT> arguments_frame = "
+ "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
+ source_out() << " TorqueStructArguments "
+ "torque_arguments(GetFrameArguments(arguments_frame, "
+ "arguments_length));\n";
+ source_out()
+ << " CodeStubArguments arguments(this, torque_arguments);\n";
+
+ parameters.Push("torque_arguments.frame");
+ parameters.Push("torque_arguments.base");
+ parameters.Push("torque_arguments.length");
+ const Type* arguments_type = TypeOracle::GetArgumentsType();
+ StackRange range = parameter_types.PushMany(LowerType(arguments_type));
+ parameter_bindings.Add(
+ *signature.arguments_variable,
+ LocalValue{true, VisitResult(arguments_type, range)}, true);
+ }
+
+ for (size_t i = 0; i < signature.implicit_count; ++i) {
+ const std::string& param_name = signature.parameter_names[i]->value;
+ SourcePosition param_pos = signature.parameter_names[i]->pos;
+ std::string generated_name = AddParameter(
+ i, builtin, &parameters, &parameter_types, &parameter_bindings, true);
+ const Type* actual_type = signature.parameter_types.types[i];
+ const Type* expected_type;
+ if (param_name == "context") {
+ source_out() << " TNode<Context> " << generated_name
+ << " = UncheckedCast<Context>(Parameter("
+ << "Descriptor::kContext));\n";
+ source_out() << " USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetContextType();
+ } else if (param_name == "receiver") {
+ source_out()
+ << " TNode<Object> " << generated_name << " = "
+ << (builtin->IsVarArgsJavaScript()
+ ? "arguments.GetReceiver()"
+ : "UncheckedCast<Object>(Parameter(Descriptor::kReceiver))")
+ << ";\n";
+ source_out() << "USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetObjectType();
+ } else if (param_name == "newTarget") {
+ source_out() << " TNode<Object> " << generated_name
+ << " = UncheckedCast<Object>(Parameter("
+ << "Descriptor::kJSNewTarget));\n";
+ source_out() << "USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetObjectType();
+ } else if (param_name == "target") {
+ source_out() << " TNode<JSFunction> " << generated_name
+ << " = UncheckedCast<JSFunction>(Parameter("
+ << "Descriptor::kJSTarget));\n";
+ source_out() << "USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetJSFunctionType();
+ } else {
+ Error(
+ "Unexpected implicit parameter \"", param_name,
+ "\" for JavaScript calling convention, "
+ "expected \"context\", \"receiver\", \"target\", or \"newTarget\"")
+ .Position(param_pos);
+ expected_type = actual_type;
+ }
+ if (actual_type != expected_type) {
+ Error("According to JavaScript calling convention, expected parameter ",
+ param_name, " to have type ", *expected_type, " but found type ",
+ *actual_type)
+ .Position(param_pos);
+ }
+ }
+
+ for (size_t i = signature.implicit_count;
+ i < signature.parameter_names.size(); ++i) {
+ const std::string& parameter_name = signature.parameter_names[i]->value;
+ const Type* type = signature.types()[i];
+ const bool mark_as_used = signature.implicit_count > i;
+ std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
+ &parameter_bindings, mark_as_used);
+ source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ << " = "
+ << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
+ << ">(Parameter(Descriptor::k"
+ << CamelifyString(parameter_name) << "));\n";
+ source_out() << " USE(" << var << ");\n";
+ }
+
+ } else {
+ DCHECK(builtin->IsStub());
+
+ // Context
+ const bool context_is_implicit = signature.implicit_count > 0;
+ std::string parameter0 =
+ AddParameter(0, builtin, &parameters, &parameter_types,
+ &parameter_bindings, context_is_implicit);
+ source_out() << " TNode<Context> " << parameter0
+ << " = UncheckedCast<Context>(Parameter("
+ << "Descriptor::kContext));\n";
+ source_out() << " USE(" << parameter0 << ");\n";
+
+ for (size_t i = 1; i < signature.parameter_names.size(); ++i) {
+ const std::string& parameter_name = signature.parameter_names[i]->value;
+ const Type* type = signature.types()[i];
+ const bool mark_as_used = signature.implicit_count > i;
+ std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
+ &parameter_bindings, mark_as_used);
+ source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ << " = "
+ << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
+ << ">(Parameter(Descriptor::k"
+ << CamelifyString(parameter_name) << "));\n";
+ source_out() << " USE(" << var << ");\n";
+ }
+ }
assembler_ = CfgAssembler(parameter_types);
const Type* body_result = Visit(*builtin->body());
if (body_result != TypeOracle::GetNeverType()) {
@@ -961,6 +1035,26 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
"Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
assembler().Bind(true_block);
+ } else {
+ // Visit the expression so bindings only used in asserts are marked
+ // as such. Otherwise they might be wrongly reported as unused bindings
+ // in release builds.
+ stmt->expression->VisitAllSubExpressions([](Expression* expression) {
+ if (auto id = IdentifierExpression::DynamicCast(expression)) {
+ ValueBindingsManager::Get().TryLookup(id->name->value);
+ } else if (auto call = CallExpression::DynamicCast(expression)) {
+ for (Identifier* label : call->labels) {
+ LabelBindingsManager::Get().TryLookup(label->value);
+ }
+ // TODO(szuend): In case the call expression resolves to a macro
+ // callable, mark the macro as used as well.
+ } else if (auto call = CallMethodExpression::DynamicCast(expression)) {
+ for (Identifier* label : call->labels) {
+ LabelBindingsManager::Get().TryLookup(label->value);
+ }
+ // TODO(szuend): Mark the underlying macro as used.
+ }
+ });
}
return TypeOracle::GetVoidType();
}
@@ -978,7 +1072,7 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
ReportError(s.str());
}
LocalLabel* end =
- current_callable->IsMacro() ? LookupLabel("_macro_end") : nullptr;
+ current_callable->IsMacro() ? LookupLabel(kMacroEndLabelName) : nullptr;
if (current_callable->HasReturnValue()) {
if (!stmt->value) {
std::stringstream s;
@@ -1016,81 +1110,6 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
return TypeOracle::GetNeverType();
}
-const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
- VisitResult expression_result = Visit(stmt->iterable);
- VisitResult begin = stmt->begin
- ? Visit(*stmt->begin)
- : VisitResult(TypeOracle::GetConstInt31Type(), "0");
-
- VisitResult end = stmt->end
- ? Visit(*stmt->end)
- : GenerateCall(".length", {{expression_result}, {}});
-
- const Type* common_type = GetCommonType(begin.type(), end.type());
- VisitResult index = GenerateImplicitConvert(common_type, begin);
-
- Block* body_block = assembler().NewBlock();
- Block* increment_block = assembler().NewBlock(assembler().CurrentStack());
- Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
-
- Block* header_block = assembler().NewBlock();
-
- assembler().Goto(header_block);
-
- assembler().Bind(header_block);
-
- BreakContinueActivator activator(exit_block, increment_block);
-
- {
- StackScope comparison_scope(this);
- VisitResult result = GenerateCall("<", {{index, end}, {}});
- if (result.type() != TypeOracle::GetBoolType()) {
- ReportError("operator < with arguments(", *index.type(), ", ",
- *end.type(),
- ") used in for-of loop has to return type bool, but "
- "returned type ",
- *result.type());
- }
- comparison_scope.Yield(result);
- }
- assembler().Branch(body_block, exit_block);
-
- assembler().Bind(body_block);
- {
- VisitResult element_result;
- {
- StackScope element_scope(this);
- VisitResult result = GenerateCall("[]", {{expression_result, index}, {}});
- if (stmt->var_declaration->type) {
- const Type* declared_type =
- TypeVisitor::ComputeType(*stmt->var_declaration->type);
- result = GenerateImplicitConvert(declared_type, result);
- }
- element_result = element_scope.Yield(result);
- }
- Binding<LocalValue> element_var_binding{&ValueBindingsManager::Get(),
- stmt->var_declaration->name->value,
- LocalValue{true, element_result}};
- Visit(stmt->body);
- }
- assembler().Goto(increment_block);
-
- assembler().Bind(increment_block);
- {
- Arguments increment_args;
- increment_args.parameters = {index, {TypeOracle::GetConstInt31Type(), "1"}};
- VisitResult increment_result = GenerateCall("+", increment_args);
-
- GenerateAssignToLocation(LocationReference::VariableAccess(index),
- increment_result);
- }
-
- assembler().Goto(header_block);
-
- assembler().Bind(exit_block);
- return TypeOracle::GetVoidType();
-}
-
VisitResult ImplementationVisitor::TemporaryUninitializedStruct(
const StructType* struct_type, const std::string& reason) {
StackRange range = assembler().TopRange(0);
@@ -1346,43 +1365,51 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
InitializerResults initializer_results =
VisitInitializerResults(class_type, expr->initializers);
- // Output the code to generate an uninitialized object of the class size in
- // the GC heap.
- VisitResult allocate_result;
+ VisitResult object_map;
+ const Field& map_field = class_type->LookupField("map");
+ if (map_field.offset != 0) {
+ ReportError("class initializers must have a map as first parameter");
+ }
+ const std::map<std::string, VisitResult>& initializer_fields =
+ initializer_results.field_value_map;
+ auto it_object_map = initializer_fields.find(map_field.name_and_type.name);
if (class_type->IsExtern()) {
- const Field& map_field = class_type->LookupField("map");
- if (map_field.offset != 0) {
- ReportError(
- "external classes initializers must have a map as first parameter");
- }
- NameValueMap initializer_fields = initializer_results.field_value_map;
- if (initializer_fields.find(map_field.name_and_type.name) ==
- initializer_fields.end()) {
+ if (it_object_map == initializer_fields.end()) {
ReportError("Constructor for ", class_type->name(),
" needs Map argument!");
}
- VisitResult object_map = initializer_fields[map_field.name_and_type.name];
- Arguments size_arguments;
- size_arguments.parameters.push_back(object_map);
- VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
- size_arguments, {class_type}, false);
-
- object_size =
- AddVariableObjectSize(object_size, class_type, initializer_results);
-
- Arguments allocate_arguments;
- allocate_arguments.parameters.push_back(object_size);
- allocate_result =
- GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
- DCHECK(allocate_result.IsOnStack());
+ object_map = it_object_map->second;
} else {
- Arguments allocate_arguments;
- allocate_arguments.parameters.push_back(
- VisitResult(TypeOracle::GetConstexprIntPtrType(),
- std::to_string(class_type->size() / kTaggedSize)));
- allocate_result = GenerateCall("%AllocateInternalClass", allocate_arguments,
- {class_type}, false);
- }
+ if (it_object_map != initializer_fields.end()) {
+ ReportError(
+ "Constructor for ", class_type->name(),
+ " must not specify Map argument; it is automatically inserted.");
+ }
+ Arguments get_struct_map_arguments;
+ get_struct_map_arguments.parameters.push_back(
+ VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
+ CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
+ object_map =
+ GenerateCall("%GetStructMap", get_struct_map_arguments, {}, false);
+ CurrentSourcePosition::Scope current_pos(expr->pos);
+ initializer_results.names.insert(initializer_results.names.begin(),
+ MakeNode<Identifier>("map"));
+ initializer_results.field_value_map[map_field.name_and_type.name] =
+ object_map;
+ }
+ Arguments size_arguments;
+ size_arguments.parameters.push_back(object_map);
+ VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
+ size_arguments, {class_type}, false);
+
+ object_size =
+ AddVariableObjectSize(object_size, class_type, initializer_results);
+
+ Arguments allocate_arguments;
+ allocate_arguments.parameters.push_back(object_size);
+ VisitResult allocate_result =
+ GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
+ DCHECK(allocate_result.IsOnStack());
InitializeAggregate(class_type, allocate_result, initializer_results);
@@ -1390,7 +1417,8 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
}
const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
- base::Optional<Binding<LocalLabel>*> break_label = TryLookupLabel("_break");
+ base::Optional<Binding<LocalLabel>*> break_label =
+ TryLookupLabel(kBreakLabelName);
if (!break_label) {
ReportError("break used outside of loop");
}
@@ -1400,7 +1428,7 @@ const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
base::Optional<Binding<LocalLabel>*> continue_label =
- TryLookupLabel("_continue");
+ TryLookupLabel(kContinueLabelName);
if (!continue_label) {
ReportError("continue used outside of loop");
}
@@ -1466,17 +1494,21 @@ VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
"initialization expressions");
}
-void ImplementationVisitor::GenerateImplementation(const std::string& dir,
- Namespace* nspace) {
- std::string new_source(nspace->source());
- std::string base_file_name =
- "builtins-" + DashifyString(nspace->name()) + "-gen-tq";
+void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
+ for (SourceId file : SourceFileMap::AllSources()) {
+ std::string path_from_root =
+ SourceFileMap::PathFromV8RootWithoutExtension(file);
- std::string source_file_name = dir + "/" + base_file_name + ".cc";
- WriteFile(source_file_name, new_source);
- std::string new_header(nspace->header());
- std::string header_file_name = dir + "/" + base_file_name + ".h";
- WriteFile(header_file_name, new_header);
+ std::string new_source(
+ GlobalContext::GeneratedPerFile(file).csa_ccfile.str());
+
+ std::string source_file_name = dir + "/" + path_from_root + "-tq-csa.cc";
+ WriteFile(source_file_name, new_source);
+ std::string new_header(
+ GlobalContext::GeneratedPerFile(file).csa_headerfile.str());
+ std::string header_file_name = dir + "/" + path_from_root + "-tq-csa.h";
+ WriteFile(header_file_name, new_header);
+ }
}
void ImplementationVisitor::GenerateMacroFunctionDeclaration(
@@ -1569,7 +1601,7 @@ void FailCallableLookup(const std::string& reason, const QualifiedName& name,
Callable* GetOrCreateSpecialization(const SpecializationKey& key) {
if (base::Optional<Callable*> specialization =
- key.generic->GetSpecialization(key.specialized_types)) {
+ key.generic->specializations().Get(key.specialized_types)) {
return *specialization;
}
return DeclarationVisitor::SpecializeImplicit(key);
@@ -1876,7 +1908,7 @@ LocationReference ImplementationVisitor::GetLocationReference(
return LocationReference::Temporary(
(*value)->value, "constant value " + expr->name->value);
}
- return LocationReference::VariableAccess((*value)->value);
+ return LocationReference::VariableAccess((*value)->value, *value);
}
}
@@ -1973,6 +2005,12 @@ void ImplementationVisitor::GenerateAssignToLocation(
GenerateImplicitConvert(variable.type(), assignment_value);
assembler().Poke(variable.stack_range(), converted_value.stack_range(),
variable.type());
+
+ // Local variables are detected by the existence of a binding. Assignment
+ // to local variables is recorded to support lint errors.
+ if (reference.binding()) {
+ (*reference.binding())->SetWritten();
+ }
} else if (reference.IsIndexedFieldAccess()) {
ReportError("assigning a value directly to an indexed field isn't allowed");
} else if (reference.IsHeapReference()) {
@@ -2167,6 +2205,7 @@ VisitResult ImplementationVisitor::GenerateCall(
if (is_tailcall) {
ReportError("can't tail call a macro");
}
+ macro->SetUsed();
if (return_type->IsConstexpr()) {
DCHECK_EQ(0, arguments.labels.size());
std::stringstream result;
@@ -2534,6 +2573,7 @@ std::string ImplementationVisitor::ExternalParameterName(
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable)
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentFileStreams)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue)
bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
@@ -2556,7 +2596,7 @@ bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
base::Optional<Block*> ImplementationVisitor::GetCatchBlock() {
base::Optional<Block*> catch_block;
if (base::Optional<Binding<LocalLabel>*> catch_handler =
- TryLookupLabel("_catch")) {
+ TryLookupLabel(kCatchLabelName)) {
catch_block = assembler().NewBlock(base::nullopt, true);
}
return catch_block;
@@ -2566,7 +2606,7 @@ void ImplementationVisitor::GenerateCatchBlock(
base::Optional<Block*> catch_block) {
if (catch_block) {
base::Optional<Binding<LocalLabel>*> catch_handler =
- TryLookupLabel("_catch");
+ TryLookupLabel(kCatchLabelName);
if (assembler().CurrentBlockIsComplete()) {
assembler().Bind(*catch_block);
assembler().Goto((*catch_handler)->block, 1);
@@ -2594,6 +2634,12 @@ void ImplementationVisitor::VisitAllDeclarables() {
void ImplementationVisitor::Visit(Declarable* declarable) {
CurrentScope::Scope current_scope(declarable->ParentScope());
CurrentSourcePosition::Scope current_source_position(declarable->Position());
+ CurrentFileStreams::Scope current_file_streams(
+ &GlobalContext::GeneratedPerFile(declarable->Position().source));
+ if (Callable* callable = Callable::DynamicCast(declarable)) {
+ if (!callable->ShouldGenerateExternalCode())
+ CurrentFileStreams::Get() = nullptr;
+ }
switch (declarable->kind()) {
case Declarable::kExternMacro:
return Visit(ExternMacro::cast(declarable));
@@ -2612,6 +2658,7 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
case Declarable::kExternConstant:
case Declarable::kNamespace:
case Declarable::kGeneric:
+ case Declarable::kGenericStructType:
return;
}
}
@@ -2891,9 +2938,81 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
private:
std::ostream& out_;
};
-
} // namespace
+void ImplementationVisitor::GenerateInstanceTypes(
+ const std::string& output_directory) {
+ std::stringstream header;
+ std::string file_name = "instance-types-tq.h";
+ {
+ IncludeGuardScope(header, file_name);
+
+ header << "#define TORQUE_DEFINED_INSTANCE_TYPES(V) \\\n";
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ header << " V(" << type_name << ") \\\n";
+ }
+ header << "\n\n";
+
+ header << "#define TORQUE_STRUCT_LIST_GENERATOR(V, _) \\\n";
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ std::string variable_name = SnakeifyString(type->name());
+ header << " V(_, " << type_name << ", " << type->name() << ", "
+ << variable_name << ") \\\n";
+ }
+ header << "\n";
+ }
+ std::string output_header_path = output_directory + "/" + file_name;
+ WriteFile(output_header_path, header.str());
+}
+
+void ImplementationVisitor::GenerateCppForInternalClasses(
+ const std::string& output_directory) {
+ std::stringstream header;
+ std::stringstream inl;
+ std::string base_name = "internal-class-definitions-tq";
+ {
+ IncludeGuardScope header_guard(header, base_name + ".h");
+ header << "#include \"src/objects/objects.h\"\n";
+ header << "#include \"src/objects/struct.h\"\n";
+ header << "#include \"src/objects/js-objects.h\"\n";
+ header << "#include \"src/utils/utils.h\"\n";
+ header << "#include \"torque-generated/class-definitions-tq.h\"\n";
+ IncludeObjectMacrosScope header_macros(header);
+ NamespaceScope header_namespaces(header, {"v8", "internal"});
+
+ IncludeGuardScope inl_guard(inl, base_name + "-inl.h");
+ inl << "#include \"torque-generated/" << base_name << ".h\"\n";
+ inl << "#include \"torque-generated/class-definitions-tq-inl.h\"\n";
+ IncludeObjectMacrosScope inl_macros(inl);
+ NamespaceScope inl_namespaces(inl, {"v8", "internal"});
+
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ const ClassType* super = type->GetSuperClass();
+ std::string parent = "TorqueGenerated" + type->name() + "<" +
+ type->name() + ", " + super->name() + ">";
+ header << "class " << type->name() << ": public " << parent << " {\n";
+ header << " public:\n";
+ header << " TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
+ header << "};\n\n";
+
+ inl << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
+ }
+ }
+ std::string dir_basename = output_directory + "/" + base_name;
+ WriteFile(dir_basename + ".h", header.str());
+ WriteFile(dir_basename + "-inl.h", inl.str());
+}
+
void ImplementationVisitor::GenerateClassFieldOffsets(
const std::string& output_directory) {
std::stringstream header;
@@ -2903,7 +3022,6 @@ void ImplementationVisitor::GenerateClassFieldOffsets(
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
- if (!type->IsExtern()) continue;
// TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
// to generate field offsets without the use of macros.
@@ -2938,8 +3056,8 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
hdr_ << " static constexpr int " << field << " = " << previous_field_end_
<< ";\n";
hdr_ << " static constexpr int " << field_end << " = " << field << " + "
- << size_string << ";\n";
- previous_field_end_ = field_end;
+ << size_string << " - 1;\n";
+ previous_field_end_ = field_end + " + 1";
}
virtual void WriteMarker(const std::string& marker) {
hdr_ << " static constexpr int " << marker << " = " << previous_field_end_
@@ -3148,7 +3266,7 @@ void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
- inl_ << " return Smi::cast(READ_FIELD(*this, " << offset << "));\n";
+ inl_ << " return TaggedField<Smi, " << offset << ">::load(*this);\n";
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
@@ -3173,6 +3291,7 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
hdr_ << " // Torque type: " << field_type->ToString() << "\n";
}
hdr_ << " inline " << type << " " << name << "() const;\n";
+ hdr_ << " inline " << type << " " << name << "(Isolate* isolate) const;\n";
hdr_ << " inline void set_" << name << "(" << type
<< " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
@@ -3185,10 +3304,20 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
- inl_ << " Object value = READ_FIELD(*this, " << offset << ");\n";
+ inl_ << " Isolate* isolate = GetIsolateForPtrCompr(*this);\n";
+ inl_ << " return " << gen_name_ << "::" << name << "(isolate);\n";
+ inl_ << "}\n";
+
+ inl_ << "template <class D, class P>\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name
+ << "(Isolate* isolate) const {\n";
if (class_type) {
- inl_ << " return " << type << "::cast(value);\n";
+ inl_ << " return TaggedField<" << type << ", " << offset
+ << ">::load(isolate, *this);\n";
} else {
+ // TODO(tebbi): load value as HeapObject when possible
+ inl_ << " Object value = TaggedField<Object, " << offset
+ << ">::load(isolate, *this);\n";
inl_ << " DCHECK(" << type_check << ");\n";
inl_ << " return value;\n";
}
@@ -3235,14 +3364,18 @@ void ImplementationVisitor::GenerateClassDefinitions(
<< "#include \"torque-generated/class-definitions-tq.h\"\n\n";
implementation << "#include \"torque-generated/class-verifiers-tq.h\"\n\n";
implementation << "#include \"src/objects/struct-inl.h\"\n\n";
+ implementation
+ << "#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n\n";
NamespaceScope implementation_namespaces(implementation,
{"v8", "internal"});
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
- if (!type->GenerateCppClassDefinitions()) continue;
- CppClassGenerator g(type, header, inline_header, implementation);
- g.GenerateClass();
+ if (type->GenerateCppClassDefinitions()) {
+ CppClassGenerator g(type, header, inline_header, implementation);
+ g.GenerateClass();
+ }
}
}
WriteFile(file_basename + ".h", header.str());
@@ -3282,6 +3415,8 @@ void ImplementationVisitor::GeneratePrintDefinitions(
impl << "#include \"src/objects/objects.h\"\n\n";
impl << "#include <iosfwd>\n\n";
+ impl << "#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
impl << "#include \"src/objects/struct-inl.h\"\n\n";
impl << "#include \"src/objects/template-objects-inl.h\"\n\n";
@@ -3291,7 +3426,7 @@ void ImplementationVisitor::GeneratePrintDefinitions(
const ClassType* type = ClassType::DynamicCast(alias->type());
if (!type->ShouldGeneratePrint()) continue;
- if (type->IsExtern() && type->GenerateCppClassDefinitions()) {
+ if (type->GenerateCppClassDefinitions()) {
const ClassType* super = type->GetSuperClass();
std::string gen_name = "TorqueGenerated" + type->name();
std::string gen_name_T =
@@ -3319,8 +3454,10 @@ void GenerateClassFieldVerifier(const std::string& class_name,
if (!f.generate_verify) return;
const Type* field_type = f.name_and_type.type;
- // We only verify tagged types, not raw numbers or pointers.
- if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) return;
+ // We only verify tagged types, not raw numbers or pointers. Note that this
+ // must check against GetObjectType not GetTaggedType, because Uninitialized
+ // is a Tagged but should not be verified.
+ if (!field_type->IsSubtypeOf(TypeOracle::GetObjectType())) return;
if (f.index) {
if ((*f.index)->name_and_type.type != TypeOracle::GetSmiType()) {
@@ -3328,26 +3465,24 @@ void GenerateClassFieldVerifier(const std::string& class_name,
}
// We already verified the index field because it was listed earlier, so we
// can assume it's safe to read here.
- cc_contents << " for (int i = 0; i < Smi::ToInt(READ_FIELD(o, "
- << class_name << "::k"
- << CamelifyString((*f.index)->name_and_type.name)
- << "Offset)); ++i) {\n";
+ cc_contents << " for (int i = 0; i < TaggedField<Smi, " << class_name
+ << "::k" << CamelifyString((*f.index)->name_and_type.name)
+ << "Offset>::load(o).value(); ++i) {\n";
} else {
cc_contents << " {\n";
}
const char* object_type = f.is_weak ? "MaybeObject" : "Object";
- const char* read_fn = f.is_weak ? "READ_WEAK_FIELD" : "READ_FIELD";
const char* verify_fn =
f.is_weak ? "VerifyMaybeObjectPointer" : "VerifyPointer";
- const char* index_offset = f.index ? " + i * kTaggedSize" : "";
+ const char* index_offset = f.index ? "i * kTaggedSize" : "0";
// Name the local var based on the field name for nicer CHECK output.
- const std::string value = f.name_and_type.name + "_value";
+ const std::string value = f.name_and_type.name + "__value";
// Read the field.
- cc_contents << " " << object_type << " " << value << " = " << read_fn
- << "(o, " << class_name << "::k"
- << CamelifyString(f.name_and_type.name) << "Offset"
+ cc_contents << " " << object_type << " " << value << " = TaggedField<"
+ << object_type << ", " << class_name << "::k"
+ << CamelifyString(f.name_and_type.name) << "Offset>::load(o, "
<< index_offset << ");\n";
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
@@ -3365,16 +3500,6 @@ void GenerateClassFieldVerifier(const std::string& class_name,
if (!type_check.empty()) type_check += " || ";
type_check += strong_value + ".Is" + runtime_type + "()";
}
- // Many subtypes of JSObject can be verified in partially-initialized states
- // where their fields are all undefined. We explicitly allow that here. For
- // any such fields that should never be undefined, we can include extra code
- // in the custom verifier functions for them.
- // TODO(1240798): If Factory::InitializeJSObjectFromMap is updated to use
- // correct initial values based on the type of the field, then make this
- // check stricter too.
- if (class_type.IsSubtypeOf(TypeOracle::GetJSObjectType())) {
- type_check += " || " + strong_value + ".IsUndefined(isolate)";
- }
cc_contents << " CHECK(" << type_check << ");\n";
}
cc_contents << " }\n";
@@ -3398,6 +3523,8 @@ void ImplementationVisitor::GenerateClassVerifiers(
cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
}
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
+ cc_contents << "#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
IncludeObjectMacrosScope object_macros(cc_contents);
@@ -3408,7 +3535,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
h_contents << "class Isolate;\n";
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
- if (!type->IsExtern() || !type->ShouldGenerateVerify()) continue;
+ if (!type->ShouldGenerateVerify()) continue;
h_contents << "class " << type->name() << ";\n";
}
@@ -3420,7 +3547,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
std::string name = type->name();
- if (!type->IsExtern() || !type->ShouldGenerateVerify()) continue;
+ if (!type->ShouldGenerateVerify()) continue;
std::string method_name = name + "Verify";
@@ -3483,10 +3610,14 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
h_contents << "#include \"src/compiler/code-assembler.h\"\n";
h_contents << "#include \"src/execution/frames.h\"\n";
h_contents << "#include \"torque-generated/csa-types-tq.h\"\n";
+ h_contents
+ << "#include \"torque-generated/internal-class-definitions-tq.h\"\n";
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- cc_contents << "#include \"torque-generated/builtins-" +
- DashifyString(n->name()) + "-gen-tq.h\"\n";
+
+ for (SourceId file : SourceFileMap::AllSources()) {
+ cc_contents << "#include \"torque-generated/" +
+ SourceFileMap::PathFromV8RootWithoutExtension(file) +
+ "-tq-csa.h\"\n";
}
NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
@@ -3541,13 +3672,13 @@ void ImplementationVisitor::GenerateCSATypes(
NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
- for (auto& declarable : GlobalContext::AllDeclarables()) {
- TypeAlias* alias = TypeAlias::DynamicCast(declarable.get());
- if (!alias || alias->IsRedeclaration()) continue;
- const StructType* struct_type = StructType::DynamicCast(alias->type());
+ // Generates headers for all structs in a topologically-sorted order, since
+ // TypeOracle keeps them in the order of their resolution
+ for (auto& type : *TypeOracle::GetAggregateTypes()) {
+ const StructType* struct_type = StructType::DynamicCast(type.get());
if (!struct_type) continue;
- const std::string& name = struct_type->name();
- h_contents << "struct TorqueStruct" << name << " {\n";
+ h_contents << "struct " << struct_type->GetGeneratedTypeNameImpl()
+ << " {\n";
for (auto& field : struct_type->fields()) {
h_contents << " " << field.name_and_type.type->GetGeneratedTypeName();
h_contents << " " << field.name_and_type.name << ";\n";
@@ -3583,6 +3714,33 @@ void ImplementationVisitor::GenerateCSATypes(
WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
}
+void ReportAllUnusedMacros() {
+ for (const auto& declarable : GlobalContext::AllDeclarables()) {
+ if (!declarable->IsMacro() || declarable->IsExternMacro()) continue;
+
+ Macro* macro = Macro::cast(declarable.get());
+ if (macro->IsUsed()) continue;
+
+ if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
+ continue;
+ }
+
+ std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
+ "FromConstexpr<"};
+ const std::string name = macro->ReadableName();
+ const bool ignore =
+ std::any_of(ignored_prefixes.begin(), ignored_prefixes.end(),
+ [&name](const std::string& prefix) {
+ return StringStartsWith(name, prefix);
+ });
+
+ if (!ignore) {
+ Lint("Macro '", macro->ReadableName(), "' is never used.")
+ .Position(macro->IdentifierPosition());
+ }
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index e79c768e5c..a572ebb936 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/torque/ast.h"
#include "src/torque/cfg.h"
+#include "src/torque/declarations.h"
#include "src/torque/global-context.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -18,6 +19,10 @@ namespace v8 {
namespace internal {
namespace torque {
+template <typename T>
+class Binding;
+struct LocalValue;
+
// LocationReference is the representation of an l-value, so a value that might
// allow for assignment. For uniformity, this class can also represent
// unassignable temporaries. Assignable values fall in two categories:
@@ -26,10 +31,13 @@ namespace torque {
class LocationReference {
public:
// An assignable stack range.
- static LocationReference VariableAccess(VisitResult variable) {
+ static LocationReference VariableAccess(
+ VisitResult variable,
+ base::Optional<Binding<LocalValue>*> binding = base::nullopt) {
DCHECK(variable.IsOnStack());
LocationReference result;
result.variable_ = std::move(variable);
+ result.binding_ = binding;
return result;
}
// An unassignable value. {description} is only used for error messages.
@@ -145,6 +153,10 @@ class LocationReference {
DCHECK(IsCallAccess());
return *assign_function_;
}
+ base::Optional<Binding<LocalValue>*> binding() const {
+ DCHECK(IsVariableAccess());
+ return binding_;
+ }
private:
base::Optional<VisitResult> variable_;
@@ -155,13 +167,14 @@ class LocationReference {
base::Optional<std::string> assign_function_;
VisitResultVector call_arguments_;
base::Optional<std::string> index_field_;
+ base::Optional<Binding<LocalValue>*> binding_;
LocationReference() = default;
};
struct InitializerResults {
std::vector<Identifier*> names;
- NameValueMap field_value_map;
+ std::map<std::string, VisitResult> field_value_map;
};
template <class T>
@@ -171,7 +184,15 @@ template <class T>
class BindingsManager {
public:
base::Optional<Binding<T>*> TryLookup(const std::string& name) {
- return current_bindings_[name];
+ if (name.length() >= 2 && name[0] == '_' && name[1] != '_') {
+ Error("Trying to reference '", name, "' which is marked as unused.")
+ .Throw();
+ }
+ auto binding = current_bindings_[name];
+ if (binding) {
+ (*binding)->SetUsed();
+ }
+ return binding;
}
private:
@@ -188,7 +209,9 @@ class Binding : public T {
: T(std::forward<Args>(args)...),
manager_(manager),
name_(name),
- previous_binding_(this) {
+ previous_binding_(this),
+ used_(false),
+ written_(false) {
std::swap(previous_binding_, manager_->current_bindings_[name]);
}
template <class... Args>
@@ -196,16 +219,43 @@ class Binding : public T {
: Binding(manager, name->value, std::forward<Args>(args)...) {
declaration_position_ = name->pos;
}
- ~Binding() { manager_->current_bindings_[name_] = previous_binding_; }
+ ~Binding() {
+ if (!used_ && !SkipLintCheck()) {
+ Lint(BindingTypeString(), "'", name_,
+ "' is never used. Prefix with '_' if this is intentional.")
+ .Position(declaration_position_);
+ }
+
+ if (CheckWritten() && !written_ && !SkipLintCheck()) {
+ Lint(BindingTypeString(), "'", name_,
+ "' is never assigned to. Use 'const' instead of 'let'.")
+ .Position(declaration_position_);
+ }
+
+ manager_->current_bindings_[name_] = previous_binding_;
+ }
+
+ std::string BindingTypeString() const;
+ bool CheckWritten() const;
const std::string& name() const { return name_; }
SourcePosition declaration_position() const { return declaration_position_; }
+ bool Used() const { return used_; }
+ void SetUsed() { used_ = true; }
+
+ bool Written() const { return written_; }
+ void SetWritten() { written_ = true; }
+
private:
+ bool SkipLintCheck() const { return name_.length() > 0 && name_[0] == '_'; }
+
BindingsManager<T>* manager_;
const std::string name_;
base::Optional<Binding*> previous_binding_;
SourcePosition declaration_position_ = CurrentSourcePosition::Get();
+ bool used_;
+ bool written_;
DISALLOW_COPY_AND_ASSIGN(Binding);
};
@@ -213,16 +263,20 @@ template <class T>
class BlockBindings {
public:
explicit BlockBindings(BindingsManager<T>* manager) : manager_(manager) {}
- void Add(std::string name, T value) {
+ void Add(std::string name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name);
- bindings_.push_back(base::make_unique<Binding<T>>(manager_, std::move(name),
- std::move(value)));
+ auto binding =
+ base::make_unique<Binding<T>>(manager_, name, std::move(value));
+ if (mark_as_used) binding->SetUsed();
+ bindings_.push_back(std::move(binding));
}
- void Add(const Identifier* name, T value) {
+ void Add(const Identifier* name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name->value);
- bindings_.push_back(
- base::make_unique<Binding<T>>(manager_, name, std::move(value)));
+ auto binding =
+ base::make_unique<Binding<T>>(manager_, name, std::move(value));
+ if (mark_as_used) binding->SetUsed();
+ bindings_.push_back(std::move(binding));
}
std::vector<Binding<T>*> bindings() const {
@@ -264,6 +318,25 @@ struct LocalLabel {
: block(block), parameter_types(std::move(parameter_types)) {}
};
+template <>
+inline std::string Binding<LocalValue>::BindingTypeString() const {
+ return "Variable ";
+}
+template <>
+inline bool Binding<LocalValue>::CheckWritten() const {
+ // Do the check only for non-const variables and non struct types.
+ auto binding = *manager_->current_bindings_[name_];
+ return !binding->is_const && !binding->value.type()->IsStructType();
+}
+template <>
+inline std::string Binding<LocalLabel>::BindingTypeString() const {
+ return "Label ";
+}
+template <>
+inline bool Binding<LocalLabel>::CheckWritten() const {
+ return false;
+}
+
struct Arguments {
VisitResultVector parameters;
std::vector<Binding<LocalLabel>*> labels;
@@ -279,9 +352,11 @@ class ImplementationVisitor {
void GenerateClassFieldOffsets(const std::string& output_directory);
void GeneratePrintDefinitions(const std::string& output_directory);
void GenerateClassDefinitions(const std::string& output_directory);
+ void GenerateInstanceTypes(const std::string& output_directory);
void GenerateClassVerifiers(const std::string& output_directory);
void GenerateExportedMacrosAssembler(const std::string& output_directory);
void GenerateCSATypes(const std::string& output_directory);
+ void GenerateCppForInternalClasses(const std::string& output_directory);
VisitResult Visit(Expression* expr);
const Type* Visit(Statement* stmt);
@@ -365,22 +440,23 @@ class ImplementationVisitor {
const Type* Visit(VarDeclarationStatement* stmt);
const Type* Visit(VarDeclarationStatement* stmt,
BlockBindings<LocalValue>* block_bindings);
- const Type* Visit(ForOfLoopStatement* stmt);
const Type* Visit(BlockStatement* block);
const Type* Visit(ExpressionStatement* stmt);
const Type* Visit(DebugStatement* stmt);
const Type* Visit(AssertStatement* stmt);
- void BeginNamespaceFile(Namespace* nspace);
- void EndNamespaceFile(Namespace* nspace);
+ void BeginCSAFiles();
+ void EndCSAFiles();
- void GenerateImplementation(const std::string& dir, Namespace* nspace);
+ void GenerateImplementation(const std::string& dir);
DECLARE_CONTEXTUAL_VARIABLE(ValueBindingsManager,
BindingsManager<LocalValue>);
DECLARE_CONTEXTUAL_VARIABLE(LabelBindingsManager,
BindingsManager<LocalLabel>);
DECLARE_CONTEXTUAL_VARIABLE(CurrentCallable, Callable*);
+ DECLARE_CONTEXTUAL_VARIABLE(CurrentFileStreams,
+ GlobalContext::PerFileStreams*);
DECLARE_CONTEXTUAL_VARIABLE(CurrentReturnValue, base::Optional<VisitResult>);
// A BindingsManagersScope has to be active for local bindings to be created.
@@ -463,9 +539,9 @@ class ImplementationVisitor {
class BreakContinueActivator {
public:
BreakContinueActivator(Block* break_block, Block* continue_block)
- : break_binding_{&LabelBindingsManager::Get(), "_break",
+ : break_binding_{&LabelBindingsManager::Get(), kBreakLabelName,
LocalLabel{break_block}},
- continue_binding_{&LabelBindingsManager::Get(), "_continue",
+ continue_binding_{&LabelBindingsManager::Get(), kContinueLabelName,
LocalLabel{continue_block}} {}
private:
@@ -567,20 +643,16 @@ class ImplementationVisitor {
std::string ExternalParameterName(const std::string& name);
std::ostream& source_out() {
- Callable* callable = CurrentCallable::Get();
- if (!callable || callable->ShouldGenerateExternalCode()) {
- return CurrentNamespace()->source_stream();
- } else {
- return null_stream_;
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->csa_ccfile;
}
+ return null_stream_;
}
std::ostream& header_out() {
- Callable* callable = CurrentCallable::Get();
- if (!callable || callable->ShouldGenerateExternalCode()) {
- return CurrentNamespace()->header_stream();
- } else {
- return null_stream_;
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->csa_headerfile;
}
+ return null_stream_;
}
CfgAssembler& assembler() { return *assembler_; }
@@ -607,6 +679,8 @@ class ImplementationVisitor {
bool is_dry_run_;
};
+void ReportAllUnusedMacros();
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/ls/json-parser.cc b/deps/v8/src/torque/ls/json-parser.cc
index 708b75fd4c..c13449bfcc 100644
--- a/deps/v8/src/torque/ls/json-parser.cc
+++ b/deps/v8/src/torque/ls/json-parser.cc
@@ -183,7 +183,7 @@ class JsonGrammar : public Grammar {
JsonParserResult ParseJson(const std::string& input) {
// Torque needs a CurrentSourceFile scope during parsing.
// As JSON lives in memory only, a unknown file scope is created.
- SourceFileMap::Scope source_map_scope;
+ SourceFileMap::Scope source_map_scope("");
TorqueMessages::Scope messages_scope;
CurrentSourceFile::Scope unkown_file(SourceFileMap::AddSource("<json>"));
diff --git a/deps/v8/src/torque/ls/message-handler.cc b/deps/v8/src/torque/ls/message-handler.cc
index d55c11af56..6ec124b5a2 100644
--- a/deps/v8/src/torque/ls/message-handler.cc
+++ b/deps/v8/src/torque/ls/message-handler.cc
@@ -53,7 +53,7 @@ JsonValue ReadMessage() {
return ParseJson(content).value;
}
-void WriteMessage(JsonValue& message) {
+void WriteMessage(JsonValue message) {
std::string content = SerializeToString(message);
Logger::Log("[outgoing] ", content, "\n\n");
@@ -69,12 +69,12 @@ void ResetCompilationErrorDiagnostics(MessageWriter writer) {
PublishDiagnosticsNotification notification;
notification.set_method("textDocument/publishDiagnostics");
- std::string error_file = SourceFileMap::GetSource(source);
+ std::string error_file = SourceFileMap::AbsolutePath(source);
notification.params().set_uri(error_file);
// Trigger empty array creation.
USE(notification.params().diagnostics_size());
- writer(notification.GetJsonValue());
+ writer(std::move(notification.GetJsonValue()));
}
DiagnosticsFiles::Get() = {};
}
@@ -115,7 +115,7 @@ class DiagnosticCollector {
notification.set_method("textDocument/publishDiagnostics");
std::string file =
- id.IsValid() ? SourceFileMap::GetSource(id) : "<unknown>";
+ id.IsValid() ? SourceFileMap::AbsolutePath(id) : "<unknown>";
notification.params().set_uri(file);
return notification;
}
@@ -151,7 +151,7 @@ void SendCompilationDiagnostics(const TorqueCompilerResult& result,
for (auto& pair : collector.notifications()) {
PublishDiagnosticsNotification& notification = pair.second;
- writer(notification.GetJsonValue());
+ writer(std::move(notification.GetJsonValue()));
// Record all source files for which notifications are sent, so they
// can be reset before the next compiler run.
@@ -164,7 +164,7 @@ void SendCompilationDiagnostics(const TorqueCompilerResult& result,
void CompilationFinished(TorqueCompilerResult result, MessageWriter writer) {
LanguageServerData::Get() = std::move(result.language_server_data);
- SourceFileMap::Get() = result.source_file_map;
+ SourceFileMap::Get() = *result.source_file_map;
SendCompilationDiagnostics(result, writer);
}
@@ -205,7 +205,7 @@ void HandleInitializeRequest(InitializeRequest request, MessageWriter writer) {
// "workspace/didChangeWatchedFiles" capability.
// TODO(szuend): Check if client supports "LocationLink". This will
// influence the result of "goto definition".
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
}
void HandleInitializedNotification(MessageWriter writer) {
@@ -224,7 +224,7 @@ void HandleInitializedNotification(MessageWriter writer) {
reg.set_id("did-change-id");
reg.set_method("workspace/didChangeWatchedFiles");
- writer(request.GetJsonValue());
+ writer(std::move(request.GetJsonValue()));
}
void HandleTorqueFileListNotification(TorqueFileListNotification notification,
@@ -258,7 +258,7 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
// the definition not beeing found.
if (!id.IsValid()) {
response.SetNull("result");
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
return;
}
@@ -272,7 +272,7 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
response.SetNull("result");
}
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
}
void HandleChangeWatchedFilesNotification(
@@ -325,13 +325,13 @@ void HandleDocumentSymbolRequest(DocumentSymbolRequest request,
// Trigger empty array creation in case no symbols were found.
USE(response.result_size());
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
}
} // namespace
-void HandleMessage(JsonValue& raw_message, MessageWriter writer) {
- Request<bool> request(raw_message);
+void HandleMessage(JsonValue raw_message, MessageWriter writer) {
+ Request<bool> request(std::move(raw_message));
// We ignore responses for now. They are matched to requests
// by id and don't have a method set.
@@ -344,21 +344,23 @@ void HandleMessage(JsonValue& raw_message, MessageWriter writer) {
const std::string method = request.method();
if (method == "initialize") {
- HandleInitializeRequest(InitializeRequest(request.GetJsonValue()), writer);
+ HandleInitializeRequest(
+ InitializeRequest(std::move(request.GetJsonValue())), writer);
} else if (method == "initialized") {
HandleInitializedNotification(writer);
} else if (method == "torque/fileList") {
HandleTorqueFileListNotification(
- TorqueFileListNotification(request.GetJsonValue()), writer);
+ TorqueFileListNotification(std::move(request.GetJsonValue())), writer);
} else if (method == "textDocument/definition") {
- HandleGotoDefinitionRequest(GotoDefinitionRequest(request.GetJsonValue()),
- writer);
+ HandleGotoDefinitionRequest(
+ GotoDefinitionRequest(std::move(request.GetJsonValue())), writer);
} else if (method == "workspace/didChangeWatchedFiles") {
HandleChangeWatchedFilesNotification(
- DidChangeWatchedFilesNotification(request.GetJsonValue()), writer);
+ DidChangeWatchedFilesNotification(std::move(request.GetJsonValue())),
+ writer);
} else if (method == "textDocument/documentSymbol") {
- HandleDocumentSymbolRequest(DocumentSymbolRequest(request.GetJsonValue()),
- writer);
+ HandleDocumentSymbolRequest(
+ DocumentSymbolRequest(std::move(request.GetJsonValue())), writer);
} else {
Logger::Log("[error] Message of type ", method, " is not handled!\n\n");
}
diff --git a/deps/v8/src/torque/ls/message-handler.h b/deps/v8/src/torque/ls/message-handler.h
index 3be5cf03e4..7f8a82b869 100644
--- a/deps/v8/src/torque/ls/message-handler.h
+++ b/deps/v8/src/torque/ls/message-handler.h
@@ -24,9 +24,9 @@ namespace ls {
// The message handler might send responses or follow up requests.
// To allow unit testing, the "sending" function is configurable.
-using MessageWriter = void (*)(JsonValue& message);
+using MessageWriter = std::function<void(JsonValue)>;
-V8_EXPORT_PRIVATE void HandleMessage(JsonValue& raw_message, MessageWriter);
+V8_EXPORT_PRIVATE void HandleMessage(JsonValue raw_message, MessageWriter);
// Called when a compilation run finishes. Exposed for testability.
V8_EXPORT_PRIVATE void CompilationFinished(TorqueCompilerResult result,
diff --git a/deps/v8/src/torque/ls/message-pipe.h b/deps/v8/src/torque/ls/message-pipe.h
index 981fed4b2f..0fbdfe4f1c 100644
--- a/deps/v8/src/torque/ls/message-pipe.h
+++ b/deps/v8/src/torque/ls/message-pipe.h
@@ -14,7 +14,7 @@ namespace torque {
namespace ls {
JsonValue ReadMessage();
-void WriteMessage(JsonValue& message);
+void WriteMessage(JsonValue message);
} // namespace ls
} // namespace torque
diff --git a/deps/v8/src/torque/ls/message.h b/deps/v8/src/torque/ls/message.h
index 4389e9265d..0d84d2ffaf 100644
--- a/deps/v8/src/torque/ls/message.h
+++ b/deps/v8/src/torque/ls/message.h
@@ -73,7 +73,7 @@ class Message : public BaseJsonAccessor {
value_ = JsonValue::From(JsonObject{});
set_jsonrpc("2.0");
}
- explicit Message(JsonValue& value) : value_(std::move(value)) {
+ explicit Message(JsonValue value) : value_(std::move(value)) {
CHECK(value_.tag == JsonValue::OBJECT);
}
@@ -241,7 +241,7 @@ class Location : public NestedJsonAccessor {
JSON_OBJECT_ACCESSORS(Range, range)
void SetTo(SourcePosition position) {
- set_uri(SourceFileMap::GetSource(position.source));
+ set_uri(SourceFileMap::AbsolutePath(position.source));
range().start().set_line(position.start.line);
range().start().set_character(position.start.column);
range().end().set_line(position.end.line);
@@ -323,7 +323,7 @@ class SymbolInformation : public NestedJsonAccessor {
template <class T>
class Request : public Message {
public:
- explicit Request(JsonValue& value) : Message(value) {}
+ explicit Request(JsonValue value) : Message(std::move(value)) {}
Request() : Message() {}
JSON_INT_ACCESSORS(id)
@@ -341,7 +341,7 @@ using DocumentSymbolRequest = Request<DocumentSymbolParams>;
template <class T>
class Response : public Message {
public:
- explicit Response(JsonValue& value) : Message(value) {}
+ explicit Response(JsonValue value) : Message(std::move(value)) {}
Response() : Message() {}
JSON_INT_ACCESSORS(id)
@@ -355,7 +355,7 @@ using GotoDefinitionResponse = Response<Location>;
template <class T>
class ResponseArrayResult : public Message {
public:
- explicit ResponseArrayResult(JsonValue& value) : Message(value) {}
+ explicit ResponseArrayResult(JsonValue value) : Message(std::move(value)) {}
ResponseArrayResult() : Message() {}
JSON_INT_ACCESSORS(id)
diff --git a/deps/v8/src/torque/ls/torque-language-server.cc b/deps/v8/src/torque/ls/torque-language-server.cc
index 4cf0b4c9fb..21e2c3957c 100644
--- a/deps/v8/src/torque/ls/torque-language-server.cc
+++ b/deps/v8/src/torque/ls/torque-language-server.cc
@@ -21,7 +21,7 @@ int WrappedMain(int argc, const char** argv) {
Logger::Scope log_scope;
TorqueFileList::Scope files_scope;
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
DiagnosticsFiles::Scope diagnostics_files_scope;
for (int i = 1; i < argc; ++i) {
@@ -32,13 +32,13 @@ int WrappedMain(int argc, const char** argv) {
}
while (true) {
- auto message = ReadMessage();
+ JsonValue message = ReadMessage();
// TODO(szuend): We should probably offload the actual message handling
// (even the parsing) to a background thread, so we can
// keep receiving messages. We might also receive
// $/cancelRequests or contet updates, that require restarts.
- HandleMessage(message, &WriteMessage);
+ HandleMessage(std::move(message), &WriteMessage);
}
return 0;
}
diff --git a/deps/v8/src/torque/server-data.h b/deps/v8/src/torque/server-data.h
index ebaafb2fd0..04cd0b317f 100644
--- a/deps/v8/src/torque/server-data.h
+++ b/deps/v8/src/torque/server-data.h
@@ -13,6 +13,7 @@
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
#include "src/torque/source-positions.h"
+#include "src/torque/type-oracle.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/torque/source-positions.cc b/deps/v8/src/torque/source-positions.cc
index b10c98f125..69be0e0911 100644
--- a/deps/v8/src/torque/source-positions.cc
+++ b/deps/v8/src/torque/source-positions.cc
@@ -4,6 +4,9 @@
#include "src/torque/source-positions.h"
+#include <fstream>
+#include "src/torque/utils.h"
+
namespace v8 {
namespace internal {
namespace torque {
@@ -12,6 +15,62 @@ DEFINE_CONTEXTUAL_VARIABLE(CurrentSourceFile)
DEFINE_CONTEXTUAL_VARIABLE(CurrentSourcePosition)
DEFINE_CONTEXTUAL_VARIABLE(SourceFileMap)
+// static
+const std::string& SourceFileMap::PathFromV8Root(SourceId file) {
+ CHECK(file.IsValid());
+ return Get().sources_[file.id_];
+}
+
+// static
+std::string SourceFileMap::AbsolutePath(SourceId file) {
+ const std::string& root_path = PathFromV8Root(file);
+ if (StringStartsWith(root_path, "file://")) return root_path;
+ return Get().v8_root_ + "/" + PathFromV8Root(file);
+}
+
+// static
+std::string SourceFileMap::PathFromV8RootWithoutExtension(SourceId file) {
+ std::string path_from_root = PathFromV8Root(file);
+ if (!StringEndsWith(path_from_root, ".tq")) {
+ Error("Not a .tq file: ", path_from_root).Throw();
+ }
+ path_from_root.resize(path_from_root.size() - strlen(".tq"));
+ return path_from_root;
+}
+
+// static
+SourceId SourceFileMap::AddSource(std::string path) {
+ Get().sources_.push_back(std::move(path));
+ return SourceId(static_cast<int>(Get().sources_.size()) - 1);
+}
+
+// static
+SourceId SourceFileMap::GetSourceId(const std::string& path) {
+ for (size_t i = 0; i < Get().sources_.size(); ++i) {
+ if (Get().sources_[i] == path) {
+ return SourceId(static_cast<int>(i));
+ }
+ }
+ return SourceId::Invalid();
+}
+
+// static
+std::vector<SourceId> SourceFileMap::AllSources() {
+ SourceFileMap& self = Get();
+ std::vector<SourceId> result;
+ for (int i = 0; i < static_cast<int>(self.sources_.size()); ++i) {
+ result.push_back(SourceId(i));
+ }
+ return result;
+}
+
+// static
+bool SourceFileMap::FileRelativeToV8RootExists(const std::string& path) {
+ const std::string file = Get().v8_root_ + "/" + path;
+ std::ifstream stream(file);
+ return stream.good();
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index c609d4600b..69ce78cf7c 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -71,34 +71,24 @@ struct SourcePosition {
DECLARE_CONTEXTUAL_VARIABLE(CurrentSourceFile, SourceId);
DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition);
-class SourceFileMap : public ContextualClass<SourceFileMap> {
+class V8_EXPORT_PRIVATE SourceFileMap : public ContextualClass<SourceFileMap> {
public:
- SourceFileMap() = default;
- static const std::string& GetSource(SourceId source) {
- CHECK(source.IsValid());
- return Get().sources_[source.id_];
- }
-
- static SourceId AddSource(std::string path) {
- Get().sources_.push_back(std::move(path));
- return SourceId(static_cast<int>(Get().sources_.size()) - 1);
- }
-
- static SourceId GetSourceId(const std::string& path) {
- for (size_t i = 0; i < Get().sources_.size(); ++i) {
- if (Get().sources_[i] == path) {
- return SourceId(static_cast<int>(i));
- }
- }
- return SourceId::Invalid();
- }
+ explicit SourceFileMap(std::string v8_root) : v8_root_(std::move(v8_root)) {}
+ static const std::string& PathFromV8Root(SourceId file);
+ static std::string PathFromV8RootWithoutExtension(SourceId file);
+ static std::string AbsolutePath(SourceId file);
+ static SourceId AddSource(std::string path);
+ static SourceId GetSourceId(const std::string& path);
+ static std::vector<SourceId> AllSources();
+ static bool FileRelativeToV8RootExists(const std::string& path);
private:
std::vector<std::string> sources_;
+ std::string v8_root_;
};
inline std::string PositionAsString(SourcePosition pos) {
- return SourceFileMap::GetSource(pos.source) + ":" +
+ return SourceFileMap::PathFromV8Root(pos.source) + ":" +
std::to_string(pos.start.line + 1) + ":" +
std::to_string(pos.start.column + 1);
}
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index d761b3ab53..a3da95c747 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -31,7 +31,7 @@ void ReadAndParseTorqueFile(const std::string& path) {
CurrentSourceFile::Scope source_id_scope(source_id);
// path might be either a normal file path or an encoded URI.
- auto maybe_content = ReadFile(path);
+ auto maybe_content = ReadFile(SourceFileMap::AbsolutePath(source_id));
if (!maybe_content) {
if (auto maybe_path = FileUriDecode(path)) {
maybe_content = ReadFile(*maybe_path);
@@ -57,27 +57,27 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
// Two-step process of predeclaration + resolution allows to resolve type
// declarations independent of the order they are given.
- PredeclarationVisitor::Predeclare(GlobalContext::Get().ast());
+ PredeclarationVisitor::Predeclare(GlobalContext::ast());
PredeclarationVisitor::ResolvePredeclarations();
// Process other declarations.
- DeclarationVisitor::Visit(GlobalContext::Get().ast());
+ DeclarationVisitor::Visit(GlobalContext::ast());
// A class types' fields are resolved here, which allows two class fields to
// mutually refer to each others.
- TypeOracle::FinalizeClassTypes();
+ TypeOracle::FinalizeAggregateTypes();
std::string output_directory = options.output_directory;
ImplementationVisitor implementation_visitor;
implementation_visitor.SetDryRun(output_directory.length() == 0);
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- implementation_visitor.BeginNamespaceFile(n);
- }
+ implementation_visitor.BeginCSAFiles();
implementation_visitor.VisitAllDeclarables();
+ ReportAllUnusedMacros();
+
implementation_visitor.GenerateBuiltinDefinitions(output_directory);
implementation_visitor.GenerateClassFieldOffsets(output_directory);
implementation_visitor.GeneratePrintDefinitions(output_directory);
@@ -85,11 +85,11 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateClassVerifiers(output_directory);
implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
implementation_visitor.GenerateCSATypes(output_directory);
+ implementation_visitor.GenerateInstanceTypes(output_directory);
+ implementation_visitor.GenerateCppForInternalClasses(output_directory);
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- implementation_visitor.EndNamespaceFile(n);
- implementation_visitor.GenerateImplementation(output_directory, n);
- }
+ implementation_visitor.EndCSAFiles();
+ implementation_visitor.GenerateImplementation(output_directory);
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::SetGlobalContext(std::move(GlobalContext::Get()));
@@ -101,8 +101,9 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
TorqueCompilerResult CompileTorque(const std::string& source,
TorqueCompilerOptions options) {
- SourceFileMap::Scope source_map_scope;
- CurrentSourceFile::Scope no_file_scope(SourceFileMap::AddSource("<torque>"));
+ SourceFileMap::Scope source_map_scope(options.v8_root);
+ CurrentSourceFile::Scope no_file_scope(
+ SourceFileMap::AddSource("dummy-filename.tq"));
CurrentAst::Scope ast_scope;
TorqueMessages::Scope messages_scope;
LanguageServerData::Scope server_data_scope;
@@ -125,7 +126,7 @@ TorqueCompilerResult CompileTorque(const std::string& source,
TorqueCompilerResult CompileTorque(std::vector<std::string> files,
TorqueCompilerOptions options) {
- SourceFileMap::Scope source_map_scope;
+ SourceFileMap::Scope source_map_scope(options.v8_root);
CurrentSourceFile::Scope unknown_source_file_scope(SourceId::Invalid());
CurrentAst::Scope ast_scope;
TorqueMessages::Scope messages_scope;
@@ -133,7 +134,9 @@ TorqueCompilerResult CompileTorque(std::vector<std::string> files,
TorqueCompilerResult result;
try {
- for (const auto& path : files) ReadAndParseTorqueFile(path);
+ for (const auto& path : files) {
+ ReadAndParseTorqueFile(path);
+ }
CompileCurrentAst(options);
} catch (TorqueAbortCompilation&) {
// Do nothing. The relevant TorqueMessage is part of the
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
index 8e412d1be0..32680986fd 100644
--- a/deps/v8/src/torque/torque-compiler.h
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -17,6 +17,7 @@ namespace torque {
struct TorqueCompilerOptions {
std::string output_directory = "";
+ std::string v8_root = "";
bool collect_language_server_data = false;
// assert(...) are only generated for debug builds. The provide
@@ -29,7 +30,7 @@ struct TorqueCompilerResult {
// Map translating SourceIds to filenames. This field is
// set on errors, so the SourcePosition of the error can be
// resolved.
- SourceFileMap source_file_map;
+ base::Optional<SourceFileMap> source_file_map;
// Eagerly collected data needed for the LanguageServer.
// Set the corresponding options flag to enable.
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 619096c6a5..0a371b79f9 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/torque/constants.h"
+#include "src/torque/declarations.h"
#include "src/torque/earley-parser.h"
#include "src/torque/torque-parser.h"
#include "src/torque/utils.h"
@@ -128,6 +129,14 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultTypeId::kStdVectorOfNameAndTypeExpression;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<ImplicitParameters>::id =
+ ParseResultTypeId::kImplicitParameters;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<ImplicitParameters>>::id =
+ ParseResultTypeId::kOptionalImplicitParameters;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<std::vector<NameAndExpression>>::id =
ParseResultTypeId::kStdVectorOfNameAndExpression;
template <>
@@ -170,14 +179,6 @@ template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<ParameterList>::id =
ParseResultTypeId::kParameterList;
template <>
-V8_EXPORT_PRIVATE const ParseResultTypeId
- ParseResultHolder<RangeExpression>::id =
- ParseResultTypeId::kRangeExpression;
-template <>
-V8_EXPORT_PRIVATE const ParseResultTypeId
- ParseResultHolder<base::Optional<RangeExpression>>::id =
- ParseResultTypeId::kOptionalRangeExpression;
-template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<TypeList>::id =
ParseResultTypeId::kTypeList;
template <>
@@ -226,11 +227,23 @@ base::Optional<ParseResult> AddGlobalDeclarations(
return base::nullopt;
}
+void NamingConventionError(const std::string& type, const std::string& name,
+ const std::string& convention,
+ SourcePosition pos = CurrentSourcePosition::Get()) {
+ Lint(type, " \"", name, "\" does not follow \"", convention,
+ "\" naming convention.")
+ .Position(pos);
+}
+
+void NamingConventionError(const std::string& type, const Identifier* name,
+ const std::string& convention) {
+ NamingConventionError(type, name->value, convention, name->pos);
+}
+
void LintGenericParameters(const GenericParameters& parameters) {
- for (const Identifier* parameter : parameters) {
+ for (auto parameter : parameters) {
if (!IsUpperCamelCase(parameter->value)) {
- NamingConventionError("Generic parameter", parameter->value,
- "UpperCamelCase");
+ NamingConventionError("Generic parameter", parameter, "UpperCamelCase");
}
}
}
@@ -277,7 +290,7 @@ Expression* MakeCall(IdentifierExpression* callee,
continue;
}
}
- auto label_name = std::string("_label") + std::to_string(label_id++);
+ auto label_name = std::string("__label") + std::to_string(label_id++);
auto label_id = MakeNode<Identifier>(label_name);
label_id->pos = SourcePosition::Invalid();
labels.push_back(label_id);
@@ -371,60 +384,61 @@ base::Optional<ParseResult> MakeSpreadExpression(
return ParseResult{result};
}
-template <bool has_varargs>
-base::Optional<ParseResult> MakeParameterListFromTypes(
+base::Optional<ParseResult> MakeImplicitParameterList(
ParseResultIterator* child_results) {
- auto implicit_params =
- child_results->NextAs<std::vector<NameAndTypeExpression>>();
- auto explicit_types = child_results->NextAs<TypeList>();
- ParameterList result;
- result.has_varargs = has_varargs;
- result.implicit_count = implicit_params.size();
- for (NameAndTypeExpression& implicit_param : implicit_params) {
- if (!IsLowerCamelCase(implicit_param.name->value)) {
- NamingConventionError("Parameter", implicit_param.name->value,
- "lowerCamelCase");
- }
- result.names.push_back(implicit_param.name);
- result.types.push_back(implicit_param.type);
- }
- for (auto* explicit_type : explicit_types) {
- result.types.push_back(explicit_type);
+ auto kind = child_results->NextAs<Identifier*>();
+ auto parameters = child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ return ParseResult{ImplicitParameters{kind, parameters}};
+}
+
+void AddParameter(ParameterList* parameter_list,
+ const NameAndTypeExpression& param) {
+ if (!IsLowerCamelCase(param.name->value)) {
+ NamingConventionError("Parameter", param.name, "lowerCamelCase");
}
- return ParseResult{std::move(result)};
+ parameter_list->names.push_back(param.name);
+ parameter_list->types.push_back(param.type);
}
-template <bool has_varargs>
-base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
+template <bool has_varargs, bool has_explicit_parameter_names>
+base::Optional<ParseResult> MakeParameterList(
ParseResultIterator* child_results) {
auto implicit_params =
- child_results->NextAs<std::vector<NameAndTypeExpression>>();
- auto explicit_params =
- child_results->NextAs<std::vector<NameAndTypeExpression>>();
- std::string arguments_variable = "";
- if (child_results->HasNext()) {
- arguments_variable = child_results->NextAs<std::string>();
- }
+ child_results->NextAs<base::Optional<ImplicitParameters>>();
ParameterList result;
- for (NameAndTypeExpression& pair : implicit_params) {
- if (!IsLowerCamelCase(pair.name->value)) {
- NamingConventionError("Parameter", pair.name->value, "lowerCamelCase");
+ result.has_varargs = has_varargs;
+ result.implicit_count = 0;
+ result.implicit_kind = ImplicitKind::kNoImplicit;
+ if (implicit_params) {
+ result.implicit_count = implicit_params->parameters.size();
+ if (implicit_params->kind->value == "implicit") {
+ result.implicit_kind = ImplicitKind::kImplicit;
+ } else {
+ DCHECK_EQ(implicit_params->kind->value, "js-implicit");
+ result.implicit_kind = ImplicitKind::kJSImplicit;
+ }
+ result.implicit_kind_pos = implicit_params->kind->pos;
+ for (NameAndTypeExpression& implicit_param : implicit_params->parameters) {
+ AddParameter(&result, implicit_param);
}
-
- result.names.push_back(std::move(pair.name));
- result.types.push_back(pair.type);
}
- for (NameAndTypeExpression& pair : explicit_params) {
- if (!IsLowerCamelCase(pair.name->value)) {
- NamingConventionError("Parameter", pair.name->value, "lowerCamelCase");
+ if (has_explicit_parameter_names) {
+ auto explicit_params =
+ child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ std::string arguments_variable = "";
+ if (has_varargs) {
+ arguments_variable = child_results->NextAs<std::string>();
+ }
+ for (NameAndTypeExpression& param : explicit_params) {
+ AddParameter(&result, param);
+ }
+ result.arguments_variable = arguments_variable;
+ } else {
+ auto explicit_types = child_results->NextAs<TypeList>();
+ for (auto* explicit_type : explicit_types) {
+ result.types.push_back(explicit_type);
}
-
- result.names.push_back(pair.name);
- result.types.push_back(pair.type);
}
- result.implicit_count = implicit_params.size();
- result.has_varargs = has_varargs;
- result.arguments_variable = arguments_variable;
return ParseResult{std::move(result)};
}
@@ -447,8 +461,8 @@ base::Optional<ParseResult> MakeDebugStatement(
}
base::Optional<ParseResult> MakeVoidType(ParseResultIterator* child_results) {
- TypeExpression* result =
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "void");
+ TypeExpression* result = MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, "void", std::vector<TypeExpression*>{});
return ParseResult{result};
}
@@ -567,7 +581,7 @@ base::Optional<ParseResult> MakeConstDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<Identifier*>();
if (!IsValidNamespaceConstName(name->value)) {
- NamingConventionError("Constant", name->value, "kUpperCamelCase");
+ NamingConventionError("Constant", name, "kUpperCamelCase");
}
auto type = child_results->NextAs<TypeExpression*>();
@@ -599,7 +613,7 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
auto transient = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
- NamingConventionError("Type", name->value, "UpperCamelCase");
+ NamingConventionError("Type", name, "UpperCamelCase");
}
auto extends = child_results->NextAs<base::Optional<Identifier*>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
@@ -700,7 +714,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (transient) flags |= ClassFlag::kTransient;
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
- NamingConventionError("Type", name->value, "UpperCamelCase");
+ NamingConventionError("Type", name, "UpperCamelCase");
}
auto extends = child_results->NextAs<base::Optional<TypeExpression*>>();
if (extends && !BasicTypeExpression::DynamicCast(*extends)) {
@@ -760,12 +774,15 @@ base::Optional<ParseResult> MakeStructDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
- NamingConventionError("Struct", name->value, "UpperCamelCase");
+ NamingConventionError("Struct", name, "UpperCamelCase");
}
+ auto generic_parameters = child_results->NextAs<GenericParameters>();
+ LintGenericParameters(generic_parameters);
auto methods = child_results->NextAs<std::vector<Declaration*>>();
auto fields = child_results->NextAs<std::vector<StructFieldExpression>>();
Declaration* result =
- MakeNode<StructDeclaration>(name, std::move(methods), std::move(fields));
+ MakeNode<StructDeclaration>(name, std::move(methods), std::move(fields),
+ std::move(generic_parameters));
return ParseResult{result};
}
@@ -777,6 +794,25 @@ base::Optional<ParseResult> MakeCppIncludeDeclaration(
return ParseResult{result};
}
+base::Optional<ParseResult> ProcessTorqueImportDeclaration(
+ ParseResultIterator* child_results) {
+ auto import_path = child_results->NextAs<std::string>();
+ if (!SourceFileMap::FileRelativeToV8RootExists(import_path)) {
+ Error("File '", import_path, "' not found.");
+ }
+
+ auto import_id = SourceFileMap::GetSourceId(import_path);
+ if (!import_id.IsValid()) {
+ // TODO(szuend): Instead of reporting and error. Queue the file up
+ // for compilation.
+ Error("File '", import_path, "'is not part of the source set.").Throw();
+ }
+
+ CurrentAst::Get().DeclareImportForCurrentFile(import_id);
+
+ return base::nullopt;
+}
+
base::Optional<ParseResult> MakeExternalBuiltin(
ParseResultIterator* child_results) {
auto transitioning = child_results->NextAs<bool>();
@@ -822,9 +858,12 @@ base::Optional<ParseResult> MakeBasicTypeExpression(
child_results->NextAs<std::vector<std::string>>();
auto is_constexpr = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
+ auto generic_arguments =
+ child_results->NextAs<std::vector<TypeExpression*>>();
TypeExpression* result = MakeNode<BasicTypeExpression>(
std::move(namespace_qualification),
- is_constexpr ? GetConstexprName(name) : std::move(name));
+ is_constexpr ? GetConstexprName(name) : std::move(name),
+ std::move(generic_arguments));
return ParseResult{result};
}
@@ -920,14 +959,14 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
{
CurrentSourcePosition::Scope current_source_position(expression->pos);
current_block->statements.push_back(MakeNode<VarDeclarationStatement>(
- true, MakeNode<Identifier>("_value"), base::nullopt, expression));
+ true, MakeNode<Identifier>("__value"), base::nullopt, expression));
}
TypeExpression* accumulated_types;
for (size_t i = 0; i < cases.size(); ++i) {
CurrentSourcePosition::Scope current_source_position(cases[i].pos);
Expression* value =
- MakeNode<IdentifierExpression>(MakeNode<Identifier>("_value"));
+ MakeNode<IdentifierExpression>(MakeNode<Identifier>("__value"));
if (i >= 1) {
value =
MakeNode<AssumeTypeImpossibleExpression>(accumulated_types, value);
@@ -939,12 +978,12 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
std::vector<Expression*>{value},
std::vector<Statement*>{MakeNode<ExpressionStatement>(
MakeNode<IdentifierExpression>(
- MakeNode<Identifier>("_NextCase")))});
+ MakeNode<Identifier>(kNextCaseLabelName)))});
case_block = MakeNode<BlockStatement>();
} else {
case_block = current_block;
}
- std::string name = "_case_value";
+ std::string name = "__case_value";
if (cases[i].name) name = *cases[i].name;
case_block->statements.push_back(MakeNode<VarDeclarationStatement>(
true, MakeNode<Identifier>(name), cases[i].type, value));
@@ -954,7 +993,7 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
current_block->statements.push_back(
MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
false, MakeNode<StatementExpression>(case_block),
- MakeNode<LabelBlock>(MakeNode<Identifier>("_NextCase"),
+ MakeNode<LabelBlock>(MakeNode<Identifier>(kNextCaseLabelName),
ParameterList::Empty(), next_block))));
current_block = next_block;
}
@@ -1004,7 +1043,7 @@ base::Optional<ParseResult> MakeVarDeclarationStatement(
if (!const_qualified) DCHECK_EQ("let", kind->value);
auto name = child_results->NextAs<Identifier*>();
if (!IsLowerCamelCase(name->value)) {
- NamingConventionError("Variable", name->value, "lowerCamelCase");
+ NamingConventionError("Variable", name, "lowerCamelCase");
}
auto type = child_results->NextAs<base::Optional<TypeExpression*>>();
@@ -1068,19 +1107,6 @@ base::Optional<ParseResult> MakeTryLabelExpression(
return ParseResult{result};
}
-base::Optional<ParseResult> MakeForOfLoopStatement(
- ParseResultIterator* child_results) {
- auto var_decl = child_results->NextAs<Statement*>();
- CheckNotDeferredStatement(var_decl);
- auto iterable = child_results->NextAs<Expression*>();
- auto range = child_results->NextAs<base::Optional<RangeExpression>>();
- auto body = child_results->NextAs<Statement*>();
- CheckNotDeferredStatement(body);
- Statement* result =
- MakeNode<ForOfLoopStatement>(var_decl, iterable, range, body);
- return ParseResult{result};
-}
-
base::Optional<ParseResult> MakeForLoopStatement(
ParseResultIterator* child_results) {
auto var_decl = child_results->NextAs<base::Optional<Statement*>>();
@@ -1098,7 +1124,7 @@ base::Optional<ParseResult> MakeForLoopStatement(
base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
auto label = child_results->NextAs<Identifier*>();
if (!IsUpperCamelCase(label->value)) {
- NamingConventionError("Label", label->value, "UpperCamelCase");
+ NamingConventionError("Label", label, "UpperCamelCase");
}
auto parameters = child_results->NextAs<ParameterList>();
auto body = child_results->NextAs<Statement*>();
@@ -1114,19 +1140,11 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
}
ParameterList parameters;
parameters.names.push_back(MakeNode<Identifier>(variable));
- parameters.types.push_back(
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "Object"));
+ parameters.types.push_back(MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, "Object", std::vector<TypeExpression*>{}));
parameters.has_varargs = false;
- LabelBlock* result = MakeNode<LabelBlock>(MakeNode<Identifier>("_catch"),
- std::move(parameters), body);
- return ParseResult{result};
-}
-
-base::Optional<ParseResult> MakeRangeExpression(
- ParseResultIterator* child_results) {
- auto begin = child_results->NextAs<base::Optional<Expression*>>();
- auto end = child_results->NextAs<base::Optional<Expression*>>();
- RangeExpression result = {begin, end};
+ LabelBlock* result = MakeNode<LabelBlock>(
+ MakeNode<Identifier>(kCatchLabelName), std::move(parameters), body);
return ParseResult{result};
}
@@ -1149,6 +1167,17 @@ base::Optional<ParseResult> MakeIdentifierFromMatchedInput(
MakeNode<Identifier>(child_results->matched_input().ToString())};
}
+base::Optional<ParseResult> MakeRightShiftIdentifier(
+ ParseResultIterator* child_results) {
+ std::string str = child_results->matched_input().ToString();
+ for (auto character : str) {
+ if (character != '>') {
+ ReportError("right-shift operators may not contain any whitespace");
+ }
+ }
+ return ParseResult{MakeNode<Identifier>(str)};
+}
+
base::Optional<ParseResult> MakeIdentifierExpression(
ParseResultIterator* child_results) {
auto namespace_qualification =
@@ -1265,7 +1294,7 @@ base::Optional<ParseResult> MakeLabelAndTypes(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<Identifier*>();
if (!IsUpperCamelCase(name->value)) {
- NamingConventionError("Label", name->value, "UpperCamelCase");
+ NamingConventionError("Label", name, "UpperCamelCase");
}
auto types = child_results->NextAs<std::vector<TypeExpression*>>();
return ParseResult{LabelAndTypes{name, std::move(types)}};
@@ -1357,9 +1386,12 @@ struct TorqueGrammar : Grammar {
}
static bool MatchIdentifier(InputPosition* pos) {
- if (!MatchChar(std::isalpha, pos)) return false;
- while (MatchChar(std::isalnum, pos) || MatchString("_", pos)) {
+ InputPosition current = *pos;
+ MatchString("_", &current);
+ if (!MatchChar(std::isalpha, &current)) return false;
+ while (MatchChar(std::isalnum, &current) || MatchString("_", &current)) {
}
+ *pos = current;
return true;
}
@@ -1476,7 +1508,9 @@ struct TorqueGrammar : Grammar {
Symbol simpleType = {
Rule({Token("("), &type, Token(")")}),
Rule({List<std::string>(Sequence({&identifier, Token("::")})),
- CheckIf(Token("constexpr")), &identifier},
+ CheckIf(Token("constexpr")), &identifier,
+ TryOrDefault<std::vector<TypeExpression*>>(
+ &genericSpecializationTypeList)},
MakeBasicTypeExpression),
Rule({Token("builtin"), Token("("), typeList, Token(")"), Token("=>"),
&simpleType},
@@ -1501,20 +1535,22 @@ struct TorqueGrammar : Grammar {
// Result: base::Optional<TypeList>
Symbol* optionalGenericParameters = Optional<TypeList>(&genericParameters);
+ Symbol implicitParameterList{
+ Rule({Token("("), OneOf({"implicit", "js-implicit"}),
+ List<NameAndTypeExpression>(&nameAndType, Token(",")), Token(")")},
+ MakeImplicitParameterList)};
+
Symbol* optionalImplicitParameterList{
- TryOrDefault<std::vector<NameAndTypeExpression>>(
- Sequence({Token("("), Token("implicit"),
- List<NameAndTypeExpression>(&nameAndType, Token(",")),
- Token(")")}))};
+ Optional<ImplicitParameters>(&implicitParameterList)};
// Result: ParameterList
Symbol typeListMaybeVarArgs = {
Rule({optionalImplicitParameterList, Token("("),
List<TypeExpression*>(Sequence({&type, Token(",")})), Token("..."),
Token(")")},
- MakeParameterListFromTypes<true>),
+ MakeParameterList<true, false>),
Rule({optionalImplicitParameterList, Token("("), typeList, Token(")")},
- MakeParameterListFromTypes<false>)};
+ MakeParameterList<false, false>)};
// Result: LabelAndTypes
Symbol labelParameter = {Rule(
@@ -1561,15 +1597,15 @@ struct TorqueGrammar : Grammar {
Symbol parameterListNoVararg = {
Rule({optionalImplicitParameterList, Token("("),
List<NameAndTypeExpression>(&nameAndType, Token(",")), Token(")")},
- MakeParameterListFromNameAndTypeList<false>)};
+ MakeParameterList<false, true>)};
// Result: ParameterList
Symbol parameterListAllowVararg = {
Rule({&parameterListNoVararg}),
Rule({optionalImplicitParameterList, Token("("),
- NonemptyList<NameAndTypeExpression>(&nameAndType, Token(",")),
- Token(","), Token("..."), &identifier, Token(")")},
- MakeParameterListFromNameAndTypeList<true>)};
+ List<NameAndTypeExpression>(Sequence({&nameAndType, Token(",")})),
+ Token("..."), &identifier, Token(")")},
+ MakeParameterList<true, true>)};
// Result: Identifier*
Symbol* OneOf(const std::vector<std::string>& alternatives) {
@@ -1672,9 +1708,14 @@ struct TorqueGrammar : Grammar {
Symbol* additiveExpression =
BinaryOperator(multiplicativeExpression, OneOf({"+", "-"}));
+ // Result: Identifier*
+ Symbol shiftOperator = {
+ Rule({Token("<<")}, MakeIdentifierFromMatchedInput),
+ Rule({Token(">"), Token(">")}, MakeRightShiftIdentifier),
+ Rule({Token(">"), Token(">"), Token(">")}, MakeRightShiftIdentifier)};
+
// Result: Expression*
- Symbol* shiftExpression =
- BinaryOperator(additiveExpression, OneOf({"<<", ">>", ">>>"}));
+ Symbol* shiftExpression = BinaryOperator(additiveExpression, &shiftOperator);
// Do not allow expressions like a < b > c because this is never
// useful and ambiguous with template parameters.
@@ -1742,12 +1783,6 @@ struct TorqueGrammar : Grammar {
// Result: ExpressionWithSource
Symbol expressionWithSource = {Rule({expression}, MakeExpressionWithSource)};
- // Result: RangeExpression
- Symbol rangeSpecifier = {
- Rule({Token("["), Optional<Expression*>(expression), Token(":"),
- Optional<Expression*>(expression), Token("]")},
- MakeRangeExpression)};
-
Symbol* optionalTypeSpecifier =
Optional<TypeExpression*>(Sequence({Token(":"), &type}));
@@ -1800,9 +1835,6 @@ struct TorqueGrammar : Grammar {
MakeAssertStatement),
Rule({Token("while"), Token("("), expression, Token(")"), &statement},
MakeWhileStatement),
- Rule({Token("for"), Token("("), &varDeclaration, Token("of"), expression,
- Optional<RangeExpression>(&rangeSpecifier), Token(")"), &statement},
- MakeForOfLoopStatement),
Rule({Token("for"), Token("("),
Optional<Statement*>(&varDeclarationWithInitialization), Token(";"),
Optional<Expression*>(expression), Token(";"),
@@ -1845,7 +1877,9 @@ struct TorqueGrammar : Grammar {
Token("{"), List<Declaration*>(&method),
List<ClassFieldExpression>(&classField), Token("}")},
AsSingletonVector<Declaration*, MakeClassDeclaration>()),
- Rule({Token("struct"), &name, Token("{"), List<Declaration*>(&method),
+ Rule({Token("struct"), &name,
+ TryOrDefault<GenericParameters>(&genericParameters), Token("{"),
+ List<Declaration*>(&method),
List<StructFieldExpression>(&structField), Token("}")},
AsSingletonVector<Declaration*, MakeStructDeclaration>()),
Rule({CheckIf(Token("transient")), Token("type"), &name,
@@ -1909,7 +1943,9 @@ struct TorqueGrammar : Grammar {
Token("}")},
AsSingletonVector<Declaration*, MakeNamespaceDeclaration>())};
- Symbol file = {Rule({&file, &namespaceDeclaration}, AddGlobalDeclarations),
+ Symbol file = {Rule({&file, Token("import"), &externalString},
+ ProcessTorqueImportDeclaration),
+ Rule({&file, &namespaceDeclaration}, AddGlobalDeclarations),
Rule({&file, &declaration}, AddGlobalDeclarations), Rule({})};
};
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index 6b596aab39..e759ce613c 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -20,21 +20,29 @@ std::string ErrorPrefixFor(TorqueMessage::Kind kind) {
int WrappedMain(int argc, const char** argv) {
std::string output_directory;
+ std::string v8_root;
std::vector<std::string> files;
for (int i = 1; i < argc; ++i) {
// Check for options
- if (!strcmp("-o", argv[i])) {
+ if (std::string(argv[i]) == "-o") {
output_directory = argv[++i];
- continue;
+ } else if (std::string(argv[i]) == "-v8-root") {
+ v8_root = std::string(argv[++i]);
+ } else {
+ // Otherwise it's a .tq file. Remember it for compilation.
+ files.emplace_back(argv[i]);
+ if (!StringEndsWith(files.back(), ".tq")) {
+ std::cerr << "Unexpected command-line argument \"" << files.back()
+ << "\", expected a .tq file.\n";
+ base::OS::Abort();
+ }
}
-
- // Otherwise it's a .tq file. Remember it for compilation.
- files.emplace_back(argv[i]);
}
TorqueCompilerOptions options;
- options.output_directory = output_directory;
+ options.output_directory = std::move(output_directory);
+ options.v8_root = std::move(v8_root);
options.collect_language_server_data = false;
options.force_assert_statements = false;
@@ -42,7 +50,7 @@ int WrappedMain(int argc, const char** argv) {
// PositionAsString requires the SourceFileMap to be set to
// resolve the file name. Needed to report errors and lint warnings.
- SourceFileMap::Scope source_file_map_scope(result.source_file_map);
+ SourceFileMap::Scope source_file_map_scope(*result.source_file_map);
for (const TorqueMessage& message : result.messages) {
if (message.position) {
diff --git a/deps/v8/src/torque/type-oracle.cc b/deps/v8/src/torque/type-oracle.cc
index 7c266a419a..47331543fc 100644
--- a/deps/v8/src/torque/type-oracle.cc
+++ b/deps/v8/src/torque/type-oracle.cc
@@ -11,8 +11,14 @@ namespace torque {
DEFINE_CONTEXTUAL_VARIABLE(TypeOracle)
// static
-void TypeOracle::FinalizeClassTypes() {
- for (const std::unique_ptr<AggregateType>& p : Get().struct_types_) {
+const std::vector<std::unique_ptr<AggregateType>>*
+TypeOracle::GetAggregateTypes() {
+ return &Get().aggregate_types_;
+}
+
+// static
+void TypeOracle::FinalizeAggregateTypes() {
+ for (const std::unique_ptr<AggregateType>& p : Get().aggregate_types_) {
p->Finalize();
}
}
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index c9d6bb0bf3..405cb41e75 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -32,7 +32,7 @@ class TypeOracle : public ContextualClass<TypeOracle> {
static StructType* GetStructType(const std::string& name) {
StructType* result = new StructType(CurrentNamespace(), name);
- Get().struct_types_.push_back(std::unique_ptr<StructType>(result));
+ Get().aggregate_types_.push_back(std::unique_ptr<StructType>(result));
return result;
}
@@ -42,7 +42,7 @@ class TypeOracle : public ContextualClass<TypeOracle> {
const TypeAlias* alias) {
ClassType* result = new ClassType(parent, CurrentNamespace(), name, flags,
generates, decl, alias);
- Get().struct_types_.push_back(std::unique_ptr<ClassType>(result));
+ Get().aggregate_types_.push_back(std::unique_ptr<ClassType>(result));
return result;
}
@@ -107,6 +107,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONSTEXPR_INTPTR_TYPE_STRING);
}
+ static const Type* GetConstexprInstanceTypeType() {
+ return Get().GetBuiltinType(CONSTEXPR_INSTANCE_TYPE_TYPE_STRING);
+ }
+
static const Type* GetVoidType() {
return Get().GetBuiltinType(VOID_TYPE_STRING);
}
@@ -135,6 +139,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(TAGGED_TYPE_STRING);
}
+ static const Type* GetUninitializedType() {
+ return Get().GetBuiltinType(UNINITIALIZED_TYPE_STRING);
+ }
+
static const Type* GetSmiType() {
return Get().GetBuiltinType(SMI_TYPE_STRING);
}
@@ -203,11 +211,19 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONST_INT32_TYPE_STRING);
}
+ static const Type* GetContextType() {
+ return Get().GetBuiltinType(CONTEXT_TYPE_STRING);
+ }
+
+ static const Type* GetJSFunctionType() {
+ return Get().GetBuiltinType(JS_FUNCTION_TYPE_STRING);
+ }
+
static bool IsImplicitlyConvertableFrom(const Type* to, const Type* from) {
for (Generic* from_constexpr :
Declarations::LookupGeneric(kFromConstexprMacroName)) {
- if (base::Optional<Callable*> specialization =
- from_constexpr->GetSpecialization({to, from})) {
+ if (base::Optional<const Callable*> specialization =
+ from_constexpr->specializations().Get({to, from})) {
if ((*specialization)->signature().GetExplicitTypes() ==
TypeVector{from}) {
return true;
@@ -217,7 +233,9 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return false;
}
- static void FinalizeClassTypes();
+ static const std::vector<std::unique_ptr<AggregateType>>* GetAggregateTypes();
+
+ static void FinalizeAggregateTypes();
private:
const Type* GetBuiltinType(const std::string& name) {
@@ -229,7 +247,7 @@ class TypeOracle : public ContextualClass<TypeOracle> {
Deduplicator<UnionType> union_types_;
Deduplicator<ReferenceType> reference_types_;
std::vector<std::unique_ptr<Type>> nominal_types_;
- std::vector<std::unique_ptr<AggregateType>> struct_types_;
+ std::vector<std::unique_ptr<AggregateType>> aggregate_types_;
std::vector<std::unique_ptr<Type>> top_types_;
};
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index e9fd50c02a..37be0df006 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -57,6 +57,12 @@ const AbstractType* TypeVisitor::ComputeType(AbstractTypeDeclaration* decl) {
const Type* parent_type = nullptr;
if (decl->extends) {
parent_type = Declarations::LookupType(*decl->extends);
+ if (parent_type->IsUnionType()) {
+ // UnionType::IsSupertypeOf requires that types can only extend from non-
+ // union types in order to work correctly.
+ ReportError("type \"", decl->name->value,
+ "\" cannot extend a type union");
+ }
}
if (generates == "" && parent_type) {
@@ -104,9 +110,25 @@ void DeclareMethods(AggregateType* container_type,
}
}
+namespace {
+std::string ComputeStructName(StructDeclaration* decl) {
+ TypeVector args;
+ if (decl->IsGeneric()) {
+ args.resize(decl->generic_parameters.size());
+ std::transform(
+ decl->generic_parameters.begin(), decl->generic_parameters.end(),
+ args.begin(), [](Identifier* parameter) {
+ return Declarations::LookupTypeAlias(QualifiedName(parameter->value))
+ ->type();
+ });
+ }
+ return StructType::ComputeName(decl->name->value, args);
+}
+} // namespace
+
const StructType* TypeVisitor::ComputeType(StructDeclaration* decl) {
CurrentSourcePosition::Scope position_activator(decl->pos);
- StructType* struct_type = TypeOracle::GetStructType(decl->name->value);
+ StructType* struct_type = TypeOracle::GetStructType(ComputeStructName(decl));
size_t offset = 0;
for (auto& field : decl->fields) {
CurrentSourcePosition::Scope position_activator(
@@ -156,28 +178,78 @@ const ClassType* TypeVisitor::ComputeType(ClassDeclaration* decl) {
new_class = TypeOracle::GetClassType(super_type, decl->name->value,
decl->flags, generates, decl, alias);
} else {
- if (decl->super) {
- ReportError("Only extern classes can inherit.");
+ if (!decl->super) {
+ ReportError("Intern class ", decl->name->value,
+ " must extend class Struct.");
+ }
+ const Type* super_type = TypeVisitor::ComputeType(*decl->super);
+ const ClassType* super_class = ClassType::DynamicCast(super_type);
+ const Type* struct_type = Declarations::LookupGlobalType("Struct");
+ if (!super_class || super_class != struct_type) {
+ ReportError("Intern class ", decl->name->value,
+ " must extend class Struct.");
}
if (decl->generates) {
ReportError("Only extern classes can specify a generated type.");
}
- new_class =
- TypeOracle::GetClassType(TypeOracle::GetTaggedType(), decl->name->value,
- decl->flags, "FixedArray", decl, alias);
+ new_class = TypeOracle::GetClassType(
+ super_type, decl->name->value,
+ decl->flags | ClassFlag::kGeneratePrint | ClassFlag::kGenerateVerify,
+ decl->name->value, decl, alias);
}
return new_class;
}
const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
- const TypeAlias* alias = Declarations::LookupTypeAlias(
- QualifiedName{basic->namespace_qualification, basic->name});
+ QualifiedName qualified_name{basic->namespace_qualification, basic->name};
+ auto& args = basic->generic_arguments;
+ const Type* type;
+ SourcePosition pos = SourcePosition::Invalid();
+
+ if (args.empty()) {
+ auto* alias = Declarations::LookupTypeAlias(qualified_name);
+ type = alias->type();
+ pos = alias->GetDeclarationPosition();
+ } else {
+ auto* generic_struct =
+ Declarations::LookupUniqueGenericStructType(qualified_name);
+ auto& params = generic_struct->generic_parameters();
+ auto& specializations = generic_struct->specializations();
+ if (params.size() != args.size()) {
+ ReportError("Generic struct takes ", params.size(),
+ " parameters, but only ", args.size(), " were given");
+ }
+
+ std::vector<const Type*> arg_types = ComputeTypeVector(args);
+ if (auto specialization = specializations.Get(arg_types)) {
+ type = *specialization;
+ } else {
+ CurrentScope::Scope generic_scope(generic_struct->ParentScope());
+ // Create a temporary fake-namespace just to temporarily declare the
+ // specialization aliases for the generic types to create a signature.
+ Namespace tmp_namespace("_tmp");
+ CurrentScope::Scope tmp_namespace_scope(&tmp_namespace);
+ auto arg_types_iterator = arg_types.begin();
+ for (auto param : params) {
+ TypeAlias* alias =
+ Declarations::DeclareType(param, *arg_types_iterator);
+ alias->SetIsUserDefined(false);
+ arg_types_iterator++;
+ }
+
+ auto struct_type = ComputeType(generic_struct->declaration());
+ specializations.Add(arg_types, struct_type);
+ type = struct_type;
+ }
+ pos = generic_struct->declaration()->name->pos;
+ }
+
if (GlobalContext::collect_language_server_data()) {
- LanguageServerData::AddDefinition(type_expression->pos,
- alias->GetDeclarationPosition());
+ LanguageServerData::AddDefinition(type_expression->pos, pos);
}
- return alias->type();
+ return type;
+
} else if (auto* union_type =
UnionTypeExpression::DynamicCast(type_expression)) {
return TypeOracle::GetUnionType(ComputeType(union_type->a),
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 1d7ca1d5f2..37a328b1dc 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -272,7 +272,25 @@ const Field& AggregateType::LookupField(const std::string& name) const {
}
std::string StructType::GetGeneratedTypeNameImpl() const {
- return "TorqueStruct" + name();
+ return "TorqueStruct" + MangledName();
+}
+
+// static
+std::string StructType::ComputeName(const std::string& basename,
+ const std::vector<const Type*>& args) {
+ if (args.size() == 0) return basename;
+ std::stringstream s;
+ s << basename << "<";
+ bool first = true;
+ for (auto t : args) {
+ if (!first) {
+ s << ", ";
+ }
+ s << t->ToString();
+ first = false;
+ }
+ s << ">";
+ return s.str();
}
std::vector<Method*> AggregateType::Methods(const std::string& name) const {
@@ -349,7 +367,7 @@ void ClassType::Finalize() const {
TypeVisitor::VisitClassFieldsAndMethods(const_cast<ClassType*>(this),
this->decl_);
is_finalized_ = true;
- if (GenerateCppClassDefinitions()) {
+ if (GenerateCppClassDefinitions() || !IsExtern()) {
for (const Field& f : fields()) {
if (f.is_weak) {
Error("Generation of C++ class for Torque class ", name(),
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 0d79c1f405..f6180c4250 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -282,6 +282,8 @@ class V8_EXPORT_PRIVATE BuiltinPointerType final : public Type {
}
size_t function_pointer_type_id() const { return function_pointer_type_id_; }
+ std::vector<std::string> GetRuntimeTypes() const override { return {"Smi"}; }
+
private:
friend class TypeOracle;
BuiltinPointerType(const Type* parent, TypeVector parameter_types,
@@ -500,6 +502,18 @@ class StructType final : public AggregateType {
DECLARE_TYPE_BOILERPLATE(StructType)
std::string ToExplicitString() const override;
std::string GetGeneratedTypeNameImpl() const override;
+ std::string MangledName() const override {
+ // TODO(gsps): Generate more readable mangled names
+ std::string str(name());
+ std::replace(str.begin(), str.end(), ',', '_');
+ std::replace(str.begin(), str.end(), ' ', '_');
+ std::replace(str.begin(), str.end(), '<', '_');
+ std::replace(str.begin(), str.end(), '>', '_');
+ return str;
+ }
+
+ static std::string ComputeName(const std::string& basename,
+ const std::vector<const Type*>& args);
private:
friend class TypeOracle;
@@ -526,10 +540,10 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
bool ShouldGeneratePrint() const {
- return flags_ & ClassFlag::kGeneratePrint;
+ return flags_ & ClassFlag::kGeneratePrint || !IsExtern();
}
bool ShouldGenerateVerify() const {
- return flags_ & ClassFlag::kGenerateVerify;
+ return flags_ & ClassFlag::kGenerateVerify || !IsExtern();
}
bool IsTransient() const override { return flags_ & ClassFlag::kTransient; }
bool IsAbstract() const { return flags_ & ClassFlag::kAbstract; }
@@ -540,7 +554,7 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kHasSameInstanceTypeAsParent;
}
bool GenerateCppClassDefinitions() const {
- return flags_ & ClassFlag::kGenerateCppClassDefinitions;
+ return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern();
}
bool HasIndexedField() const override;
size_t size() const { return size_; }
@@ -606,8 +620,6 @@ class VisitResult {
base::Optional<StackRange> stack_range_;
};
-using NameValueMap = std::map<std::string, VisitResult>;
-
VisitResult ProjectStructField(VisitResult structure,
const std::string& fieldname);
@@ -669,6 +681,7 @@ struct Signature {
base::Optional<std::string> arguments_variable;
ParameterTypes parameter_types;
size_t implicit_count;
+ size_t ExplicitCount() const { return types().size() - implicit_count; }
const Type* return_type;
LabelDeclarationVector labels;
bool HasSameTypesAs(
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 3e2f715f0d..244d1587db 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -123,12 +123,6 @@ std::string CurrentPositionAsString() {
return PositionAsString(CurrentSourcePosition::Get());
}
-void NamingConventionError(const std::string& type, const std::string& name,
- const std::string& convention) {
- Lint(type, " \"", name, "\" does not follow \"", convention,
- "\" naming convention.");
-}
-
MessageBuilder::MessageBuilder(const std::string& message,
TorqueMessage::Kind kind) {
base::Optional<SourcePosition> position;
@@ -162,7 +156,7 @@ bool ContainsUpperCase(const std::string& s) {
// keywords, e.g.: 'True', 'Undefined', etc.
// These do not need to follow the default naming convention for constants.
bool IsKeywordLikeName(const std::string& s) {
- static const char* const keyword_like_constants[]{"True", "False", "Hole",
+ static const char* const keyword_like_constants[]{"True", "False", "TheHole",
"Null", "Undefined"};
return std::find(std::begin(keyword_like_constants),
@@ -186,12 +180,16 @@ bool IsMachineType(const std::string& s) {
bool IsLowerCamelCase(const std::string& s) {
if (s.empty()) return false;
- return islower(s[0]) && !ContainsUnderscore(s);
+ size_t start = 0;
+ if (s[0] == '_') start = 1;
+ return islower(s[start]) && !ContainsUnderscore(s.substr(start));
}
bool IsUpperCamelCase(const std::string& s) {
if (s.empty()) return false;
- return isupper(s[0]) && !ContainsUnderscore(s);
+ size_t start = 0;
+ if (s[0] == '_') start = 1;
+ return isupper(s[start]) && !ContainsUnderscore(s.substr(1));
}
bool IsSnakeCase(const std::string& s) {
@@ -248,12 +246,34 @@ std::string CamelifyString(const std::string& underscore_string) {
return result;
}
+std::string SnakeifyString(const std::string& camel_string) {
+ std::string result;
+ bool previousWasLower = false;
+ for (auto current : camel_string) {
+ if (previousWasLower && isupper(current)) {
+ result += "_";
+ }
+ result += tolower(current);
+ previousWasLower = (islower(current));
+ }
+ return result;
+}
+
std::string DashifyString(const std::string& underscore_string) {
std::string result = underscore_string;
std::replace(result.begin(), result.end(), '_', '-');
return result;
}
+std::string UnderlinifyPath(std::string path) {
+ std::replace(path.begin(), path.end(), '-', '_');
+ std::replace(path.begin(), path.end(), '/', '_');
+ std::replace(path.begin(), path.end(), '\\', '_');
+ std::replace(path.begin(), path.end(), '.', '_');
+ transform(path.begin(), path.end(), path.begin(), ::toupper);
+ return path;
+}
+
void ReplaceFileContentsIfDifferent(const std::string& file_path,
const std::string& contents) {
std::ifstream old_contents_stream(file_path.c_str());
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 10b91ce7d4..fb4ad59f99 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -81,11 +81,6 @@ MessageBuilder Lint(Args&&... args) {
return Message(TorqueMessage::Kind::kLint, std::forward<Args>(args)...);
}
-// Report a LintError with the format "{type} '{name}' doesn't follow
-// '{convention}' naming convention".
-void NamingConventionError(const std::string& type, const std::string& name,
- const std::string& convention);
-
bool IsLowerCamelCase(const std::string& s);
bool IsUpperCamelCase(const std::string& s);
bool IsSnakeCase(const std::string& s);
@@ -99,7 +94,9 @@ template <class... Args>
std::string CapifyStringWithUnderscores(const std::string& camellified_string);
std::string CamelifyString(const std::string& underscore_string);
+std::string SnakeifyString(const std::string& camel_string);
std::string DashifyString(const std::string& underscore_string);
+std::string UnderlinifyPath(std::string path);
void ReplaceFileContentsIfDifferent(const std::string& file_path,
const std::string& contents);
@@ -350,6 +347,15 @@ class NullOStream : public std::ostream {
NullStreambuf buffer_;
};
+inline bool StringStartsWith(const std::string& s, const std::string& prefix) {
+ if (s.size() < prefix.size()) return false;
+ return s.substr(0, prefix.size()) == prefix;
+}
+inline bool StringEndsWith(const std::string& s, const std::string& suffix) {
+ if (s.size() < suffix.size()) return false;
+ return s.substr(s.size() - suffix.size()) == suffix;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/tracing/OWNERS b/deps/v8/src/tracing/OWNERS
index 6afd4d0fee..7ab7c063da 100644
--- a/deps/v8/src/tracing/OWNERS
+++ b/deps/v8/src/tracing/OWNERS
@@ -1,2 +1,4 @@
alph@chromium.org
petermarshall@chromium.org
+
+# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index b5d2e7d866..53839ba4b1 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -32,7 +32,9 @@ enum CategoryGroupEnabledFlags {
kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3,
};
-// By default, const char* asrgument values are assumed to have long-lived scope
+// TODO(petermarshall): Remove with the old tracing implementation - Perfetto
+// copies const char* arguments by default.
+// By default, const char* argument values are assumed to have long-lived scope
// and will not be copied. Use this macro to force a const char* to be copied.
#define TRACE_STR_COPY(str) v8::internal::tracing::TraceStringWithCopy(str)
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index ac0d46af0e..f6f3bc07ec 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
titzer@chromium.org
ahaas@chromium.org
diff --git a/deps/v8/src/utils/OWNERS b/deps/v8/src/utils/OWNERS
index 852d438bb0..3f9de7e204 100644
--- a/deps/v8/src/utils/OWNERS
+++ b/deps/v8/src/utils/OWNERS
@@ -1 +1,3 @@
file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index 27db17a479..af32e90088 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -84,7 +84,7 @@ v8::PageAllocator* SetPlatformPageAllocatorForTesting(
return old_page_allocator;
}
-void* Malloced::New(size_t size) {
+void* Malloced::operator new(size_t size) {
void* result = AllocWithRetry(size);
if (result == nullptr) {
V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
@@ -92,7 +92,7 @@ void* Malloced::New(size_t size) {
return result;
}
-void Malloced::Delete(void* p) { free(p); }
+void Malloced::operator delete(void* p) { free(p); }
char* StrDup(const char* str) {
size_t length = strlen(str);
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index fa3e6f3d7d..2f7074acb0 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -29,11 +29,8 @@ class Isolate;
// Superclass for classes managed with new & delete.
class V8_EXPORT_PRIVATE Malloced {
public:
- void* operator new(size_t size) { return New(size); }
- void operator delete(void* p) { Delete(p); }
-
- static void* New(size_t size);
- static void Delete(void* p);
+ static void* operator new(size_t size);
+ static void operator delete(void* p);
};
template <typename T>
@@ -70,8 +67,8 @@ char* StrNDup(const char* str, int n);
// and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy {
public:
- V8_INLINE void* New(size_t size) { return Malloced::New(size); }
- V8_INLINE static void Delete(void* p) { Malloced::Delete(p); }
+ V8_INLINE void* New(size_t size) { return Malloced::operator new(size); }
+ V8_INLINE static void Delete(void* p) { Malloced::operator delete(p); }
};
// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
diff --git a/deps/v8/src/utils/splay-tree-inl.h b/deps/v8/src/utils/splay-tree-inl.h
deleted file mode 100644
index bda453fd8f..0000000000
--- a/deps/v8/src/utils/splay-tree-inl.h
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UTILS_SPLAY_TREE_INL_H_
-#define V8_UTILS_SPLAY_TREE_INL_H_
-
-#include <vector>
-
-#include "src/utils/splay-tree.h"
-
-namespace v8 {
-namespace internal {
-
-
-template<typename Config, class Allocator>
-SplayTree<Config, Allocator>::~SplayTree() {
- NodeDeleter deleter;
- ForEachNode(&deleter);
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Insert(const Key& key,
- Locator* locator) {
- if (is_empty()) {
- // If the tree is empty, insert the new node.
- root_ = new(allocator_) Node(key, Config::NoValue());
- } else {
- // Splay on the key to move the last node on the search path
- // for the key to the root of the tree.
- Splay(key);
- // Ignore repeated insertions with the same key.
- int cmp = Config::Compare(key, root_->key_);
- if (cmp == 0) {
- locator->bind(root_);
- return false;
- }
- // Insert the new node.
- Node* node = new(allocator_) Node(key, Config::NoValue());
- InsertInternal(cmp, node);
- }
- locator->bind(root_);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::InsertInternal(int cmp, Node* node) {
- if (cmp > 0) {
- node->left_ = root_;
- node->right_ = root_->right_;
- root_->right_ = nullptr;
- } else {
- node->right_ = root_;
- node->left_ = root_->left_;
- root_->left_ = nullptr;
- }
- root_ = node;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
- if (is_empty())
- return false;
- Splay(key);
- return Config::Compare(key, root_->key_) == 0;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Contains(const Key& key) {
- return FindInternal(key);
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
- if (FindInternal(key)) {
- locator->bind(root_);
- return true;
- } else {
- return false;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatestLessThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp <= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->left_;
- bool result = FindGreatest(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeastGreaterThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the least node in
- // the right subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp >= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->right_;
- bool result = FindLeast(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatest(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->right_ != nullptr) current = current->right_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->left_ != nullptr) current = current->left_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Move(const Key& old_key,
- const Key& new_key) {
- if (!FindInternal(old_key))
- return false;
- Node* node_to_move = root_;
- RemoveRootNode(old_key);
- Splay(new_key);
- int cmp = Config::Compare(new_key, root_->key_);
- if (cmp == 0) {
- // A node with the target key already exists.
- delete node_to_move;
- return false;
- }
- node_to_move->key_ = new_key;
- InsertInternal(cmp, node_to_move);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Remove(const Key& key) {
- if (!FindInternal(key))
- return false;
- Node* node_to_remove = root_;
- RemoveRootNode(key);
- delete node_to_remove;
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::RemoveRootNode(const Key& key) {
- if (root_->left_ == nullptr) {
- // No left child, so the new tree is just the right child.
- root_ = root_->right_;
- } else {
- // Left child exists.
- Node* right = root_->right_;
- // Make the original left child the new root.
- root_ = root_->left_;
- // Splay to make sure that the new root has an empty right child.
- Splay(key);
- // Insert the original right child as the right child of the new
- // root.
- root_->right_ = right;
- }
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::Splay(const Key& key) {
- if (is_empty())
- return;
- Node dummy_node(Config::kNoKey, Config::NoValue());
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- Node* dummy = &dummy_node;
- Node* left = dummy;
- Node* right = dummy;
- Node* current = root_;
- while (true) {
- int cmp = Config::Compare(key, current->key_);
- if (cmp < 0) {
- if (current->left_ == nullptr) break;
- if (Config::Compare(key, current->left_->key_) < 0) {
- // Rotate right.
- Node* temp = current->left_;
- current->left_ = temp->right_;
- temp->right_ = current;
- current = temp;
- if (current->left_ == nullptr) break;
- }
- // Link right.
- right->left_ = current;
- right = current;
- current = current->left_;
- } else if (cmp > 0) {
- if (current->right_ == nullptr) break;
- if (Config::Compare(key, current->right_->key_) > 0) {
- // Rotate left.
- Node* temp = current->right_;
- current->right_ = temp->left_;
- temp->left_ = current;
- current = temp;
- if (current->right_ == nullptr) break;
- }
- // Link left.
- left->right_ = current;
- left = current;
- current = current->right_;
- } else {
- break;
- }
- }
- // Assemble.
- left->right_ = current->left_;
- right->left_ = current->right_;
- current->left_ = dummy->right_;
- current->right_ = dummy->left_;
- root_ = current;
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
- NodeToPairAdaptor<Callback> callback_adaptor(callback);
- ForEachNode(&callback_adaptor);
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
- if (root_ == nullptr) return;
- // Pre-allocate some space for tiny trees.
- std::vector<Node*> nodes_to_visit;
- nodes_to_visit.push_back(root_);
- size_t pos = 0;
- while (pos < nodes_to_visit.size()) {
- Node* node = nodes_to_visit[pos++];
- if (node->left() != nullptr) nodes_to_visit.push_back(node->left());
- if (node->right() != nullptr) nodes_to_visit.push_back(node->right());
- callback->Call(node);
- }
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_UTILS_SPLAY_TREE_INL_H_
diff --git a/deps/v8/src/utils/splay-tree.h b/deps/v8/src/utils/splay-tree.h
deleted file mode 100644
index 47633f39db..0000000000
--- a/deps/v8/src/utils/splay-tree.h
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UTILS_SPLAY_TREE_H_
-#define V8_UTILS_SPLAY_TREE_H_
-
-#include "src/utils/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// A splay tree. The config type parameter encapsulates the different
-// configurations of a concrete splay tree:
-//
-// typedef Key: the key type
-// typedef Value: the value type
-// static const Key kNoKey: the dummy key used when no key is set
-// static Value kNoValue(): the dummy value used to initialize nodes
-// static int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
-//
-// The tree is also parameterized by an allocation policy
-// (Allocator). The policy is used for allocating lists in the C free
-// store or the zone; see zone.h.
-
-// Forward defined as
-// template <typename Config, class Allocator = FreeStoreAllocationPolicy>
-// class SplayTree;
-template <typename Config, class AllocationPolicy>
-class SplayTree {
- public:
- using Key = typename Config::Key;
- using Value = typename Config::Value;
-
- class Locator;
-
- explicit SplayTree(AllocationPolicy allocator = AllocationPolicy())
- : root_(nullptr), allocator_(allocator) {}
- ~SplayTree();
-
- V8_INLINE void* operator new(
- size_t size, AllocationPolicy allocator = AllocationPolicy()) {
- return allocator.New(static_cast<int>(size));
- }
- V8_INLINE void operator delete(void* p) { AllocationPolicy::Delete(p); }
- // Please the MSVC compiler. We should never have to execute this.
- V8_INLINE void operator delete(void* p, AllocationPolicy policy) {
- UNREACHABLE();
- }
-
- AllocationPolicy allocator() { return allocator_; }
-
- // Checks if there is a mapping for the key.
- bool Contains(const Key& key);
-
- // Inserts the given key in this tree with the given value. Returns
- // true if a node was inserted, otherwise false. If found the locator
- // is enabled and provides access to the mapping for the key.
- bool Insert(const Key& key, Locator* locator);
-
- // Looks up the key in this tree and returns true if it was found,
- // otherwise false. If the node is found the locator is enabled and
- // provides access to the mapping for the key.
- bool Find(const Key& key, Locator* locator);
-
- // Finds the mapping with the greatest key less than or equal to the
- // given key.
- bool FindGreatestLessThan(const Key& key, Locator* locator);
-
- // Find the mapping with the greatest key in this tree.
- bool FindGreatest(Locator* locator);
-
- // Finds the mapping with the least key greater than or equal to the
- // given key.
- bool FindLeastGreaterThan(const Key& key, Locator* locator);
-
- // Find the mapping with the least key in this tree.
- bool FindLeast(Locator* locator);
-
- // Move the node from one key to another.
- bool Move(const Key& old_key, const Key& new_key);
-
- // Remove the node with the given key from the tree.
- bool Remove(const Key& key);
-
- // Remove all keys from the tree.
- void Clear() { ResetRoot(); }
-
- bool is_empty() { return root_ == nullptr; }
-
- // Perform the splay operation for the given key. Moves the node with
- // the given key to the top of the tree. If no node has the given
- // key, the last node on the search path is moved to the top of the
- // tree.
- void Splay(const Key& key);
-
- class Node {
- public:
- Node(const Key& key, const Value& value)
- : key_(key), value_(value), left_(nullptr), right_(nullptr) {}
-
- V8_INLINE void* operator new(size_t size, AllocationPolicy allocator) {
- return allocator.New(static_cast<int>(size));
- }
- V8_INLINE void operator delete(void* p) {
- return AllocationPolicy::Delete(p);
- }
- // Please the MSVC compiler. We should never have to execute
- // this.
- V8_INLINE void operator delete(void* p, AllocationPolicy allocator) {
- UNREACHABLE();
- }
-
- Key key() { return key_; }
- Value value() { return value_; }
- Node* left() { return left_; }
- Node* right() { return right_; }
-
- private:
- friend class SplayTree;
- friend class Locator;
- Key key_;
- Value value_;
- Node* left_;
- Node* right_;
- };
-
- // A locator provides access to a node in the tree without actually
- // exposing the node.
- class Locator {
- public:
- explicit Locator(Node* node) : node_(node) {}
- Locator() : node_(nullptr) {}
- const Key& key() { return node_->key_; }
- Value& value() { return node_->value_; }
- void set_value(const Value& value) { node_->value_ = value; }
- inline void bind(Node* node) { node_ = node; }
-
- private:
- Node* node_;
- };
-
- template <class Callback>
- void ForEach(Callback* callback);
-
- protected:
- // Resets tree root. Existing nodes become unreachable.
- void ResetRoot() { root_ = nullptr; }
-
- private:
- // Search for a node with a given key. If found, root_ points
- // to the node.
- bool FindInternal(const Key& key);
-
- // Inserts a node assuming that root_ is already set up.
- void InsertInternal(int cmp, Node* node);
-
- // Removes root_ node.
- void RemoveRootNode(const Key& key);
-
- template <class Callback>
- class NodeToPairAdaptor {
- public:
- explicit NodeToPairAdaptor(Callback* callback) : callback_(callback) {}
- void Call(Node* node) { callback_->Call(node->key(), node->value()); }
-
- private:
- Callback* callback_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeToPairAdaptor);
- };
-
- class NodeDeleter {
- public:
- NodeDeleter() = default;
- void Call(Node* node) { AllocationPolicy::Delete(node); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
- };
-
- template <class Callback>
- void ForEachNode(Callback* callback);
-
- Node* root_;
- AllocationPolicy allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(SplayTree);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_UTILS_SPLAY_TREE_H_
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index 17e07d3042..20d85aae10 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -777,36 +777,16 @@ inline T truncate_to_intn(T x, unsigned n) {
return (x & ((static_cast<T>(1) << n) - 1));
}
-#define INT_1_TO_63_LIST(V) \
- V(1) \
- V(2) \
- V(3) \
- V(4) \
- V(5) \
- V(6) \
- V(7) \
- V(8) \
- V(9) \
- V(10) \
- V(11) \
- V(12) \
- V(13) \
- V(14) \
- V(15) \
- V(16) \
- V(17) \
- V(18) \
- V(19) \
- V(20) \
- V(21) \
- V(22) \
- V(23) \
- V(24) \
- V(25) \
- V(26) V(27) V(28) V(29) V(30) V(31) V(32) V(33) V(34) V(35) V(36) V(37) \
- V(38) V(39) V(40) V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) \
- V(50) V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
- V(61) V(62) V(63)
+// clang-format off
+#define INT_1_TO_63_LIST(V) \
+ V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) V(10) \
+ V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) V(20) \
+ V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) V(30) \
+ V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+ V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) V(50) \
+ V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
+ V(61) V(62) V(63)
+// clang-format on
#define DECLARE_IS_INT_N(N) \
inline bool is_int##N(int64_t x) { return is_intn(x, N); }
@@ -875,12 +855,6 @@ class BailoutId {
int ToInt() const { return id_; }
static BailoutId None() { return BailoutId(kNoneId); }
- static BailoutId ScriptContext() { return BailoutId(kScriptContextId); }
- static BailoutId FunctionContext() { return BailoutId(kFunctionContextId); }
- static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
- static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
- static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
- static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
// Special bailout id support for deopting into the {JSConstructStub} stub.
// The following hard-coded deoptimization points are supported by the stub:
@@ -905,25 +879,10 @@ class BailoutId {
static const int kNoneId = -1;
// Using 0 could disguise errors.
- static const int kScriptContextId = 1;
- static const int kFunctionContextId = 2;
- static const int kFunctionEntryId = 3;
-
- // This AST id identifies the point after the declarations have been visited.
- // We need it to capture the environment effects of declarations that emit
- // code (function declarations).
- static const int kDeclarationsId = 4;
-
- // Every FunctionState starts with this id.
- static const int kFirstUsableId = 5;
-
- // Every compiled stub starts with this id.
- static const int kStubEntryId = 6;
-
// Builtin continuations bailout ids start here. If you need to add a
// non-builtin BailoutId, add it before this id so that this Id has the
// highest number.
- static const int kFirstBuiltinContinuationId = 7;
+ static const int kFirstBuiltinContinuationId = 1;
int id_;
};
diff --git a/deps/v8/src/utils/vector.h b/deps/v8/src/utils/vector.h
index 5b6c878e34..dd5c59e553 100644
--- a/deps/v8/src/utils/vector.h
+++ b/deps/v8/src/utils/vector.h
@@ -230,6 +230,8 @@ constexpr Vector<const uint8_t> StaticCharVector(const char (&array)[N]) {
return Vector<const uint8_t>::cast(Vector<const char>(array, N - 1));
}
+// The resulting vector does not contain a null-termination byte. If you want
+// the null byte, use ArrayVector("foo").
inline Vector<const char> CStrVector(const char* data) {
return Vector<const char>(data, strlen(data));
}
@@ -250,6 +252,9 @@ inline Vector<char> MutableCStrVector(char* data, size_t max) {
return Vector<char>(data, strnlen(data, max));
}
+// For string literals, ArrayVector("foo") returns a vector ['f', 'o', 'o', \0]
+// with length 4 and null-termination.
+// If you want ['f', 'o', 'o'], use CStrVector("foo").
template <typename T, size_t N>
inline constexpr Vector<T> ArrayVector(T (&arr)[N]) {
return Vector<T>{arr, N};
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index c9b1aa4d78..8aa6e24739 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index b2cd566873..834eb181d8 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("arm " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -223,7 +221,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
int LiftoffAssembler::PrepareStackFrame() {
if (!CpuFeatures::IsSupported(ARMv7)) {
- BAILOUT("Armv6 not supported");
+ bailout(kUnsupportedArchitecture, "Armv6 not supported");
return 0;
}
uint32_t offset = static_cast<uint32_t>(pc_offset());
@@ -247,7 +245,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
- BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
@@ -750,7 +749,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
if (!CpuFeatures::IsSupported(SUDIV)) {
- BAILOUT("i32_divs");
+ bailout(kMissingCPUFeature, "i32_divs");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -778,7 +777,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
if (!CpuFeatures::IsSupported(SUDIV)) {
- BAILOUT("i32_divu");
+ bailout(kMissingCPUFeature, "i32_divu");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -793,7 +792,7 @@ void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
- BAILOUT("i32_rems");
+ bailout(kMissingCPUFeature, "i32_rems");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -814,7 +813,7 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
- BAILOUT("i32_remu");
+ bailout(kMissingCPUFeature, "i32_remu");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -1564,6 +1563,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index b1d71dce2f..57a157d3a7 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("arm64 " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -135,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
if (!IsImmAddSub(bytes)) {
// Stack greater than 4M! Because this is a quite improbable case, we
// just fallback to Turbofan.
- BAILOUT("Stack too big");
+ bailout(kOtherReason, "Stack too big");
return;
}
}
@@ -144,7 +142,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
- BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
@@ -173,7 +172,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
patching_assembler.PatchSubSp(bytes);
}
-void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
+void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
@@ -1088,6 +1087,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 1b5ca87c3d..7bc3596d2e 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define REQUIRE_CPU_FEATURE(name, ...) \
- if (!CpuFeatures::IsSupported(name)) { \
- bailout("no " #name); \
- return __VA_ARGS__; \
- } \
+#define REQUIRE_CPU_FEATURE(name, ...) \
+ if (!CpuFeatures::IsSupported(name)) { \
+ bailout(kMissingCPUFeature, "no " #name); \
+ return __VA_ARGS__; \
+ } \
CpuFeatureScope feature(this, name);
namespace liftoff {
@@ -1390,7 +1390,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout("no SSE4.1");
+ assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 40e1636b6e..766ce71db1 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -11,8 +11,8 @@
#include "src/base/bits.h"
#include "src/base/small-vector.h"
#include "src/codegen/macro-assembler.h"
-#include "src/execution/frames.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -635,13 +635,16 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
const CacheState* cache_state() const { return &cache_state_; }
- bool did_bailout() { return bailout_reason_ != nullptr; }
- const char* bailout_reason() const { return bailout_reason_; }
+ bool did_bailout() { return bailout_reason_ != kSuccess; }
+ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
+ const char* bailout_detail() const { return bailout_detail_; }
- void bailout(const char* reason) {
- if (bailout_reason_ != nullptr) return;
+ void bailout(LiftoffBailoutReason reason, const char* detail) {
+ DCHECK_NE(kSuccess, reason);
+ if (bailout_reason_ != kSuccess) return;
AbortCompilation();
bailout_reason_ = reason;
+ bailout_detail_ = detail;
}
private:
@@ -655,7 +658,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
uint32_t num_used_spill_slots_ = 0;
- const char* bailout_reason_ = nullptr;
+ LiftoffBailoutReason bailout_reason_ = kSuccess;
+ const char* bailout_detail_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index caf00a24ca..7a87ae1a95 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -174,7 +174,8 @@ class LiftoffCompiler {
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {}
- bool ok() const { return ok_; }
+ bool did_bailout() const { return bailout_reason_ != kSuccess; }
+ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
void GetCode(CodeDesc* desc) {
asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
@@ -195,30 +196,51 @@ class LiftoffCompiler {
return __ GetTotalFrameSlotCount();
}
- void unsupported(FullDecoder* decoder, const char* reason) {
- ok_ = false;
- TRACE("unsupported: %s\n", reason);
+ void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
+ const char* detail) {
+ DCHECK_NE(kSuccess, reason);
+ if (did_bailout()) return;
+ bailout_reason_ = reason;
+ TRACE("unsupported: %s\n", detail);
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
- reason);
+ detail);
UnuseLabels(decoder);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
- unsupported(decoder, __ bailout_reason());
+ unsupported(decoder, __ bailout_reason(), __ bailout_detail());
return true;
}
+ LiftoffBailoutReason BailoutReasonForType(ValueType type) {
+ switch (type) {
+ case kWasmS128:
+ return kSimd;
+ case kWasmAnyRef:
+ case kWasmFuncRef:
+ case kWasmNullRef:
+ return kAnyRef;
+ case kWasmExnRef:
+ return kExceptionHandling;
+ case kWasmBottom:
+ return kMultiValue;
+ default:
+ return kOtherReason;
+ }
+ }
+
bool CheckSupportedType(FullDecoder* decoder,
Vector<const ValueType> supported_types,
ValueType type, const char* context) {
- char buffer[128];
// Check supported types.
for (ValueType supported : supported_types) {
if (type == supported) return true;
}
- SNPrintF(ArrayVector(buffer), "%s %s", ValueTypes::TypeName(type), context);
- unsupported(decoder, buffer);
+ LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
+ EmbeddedVector<char, 128> buffer;
+ SNPrintF(buffer, "%s %s", ValueTypes::TypeName(type), context);
+ unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -394,17 +416,17 @@ class LiftoffCompiler {
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
}
- void GenerateOutOfLineCode(OutOfLineCode& ool) {
- __ bind(ool.label.get());
- const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
+ void GenerateOutOfLineCode(OutOfLineCode* ool) {
+ __ bind(ool->label.get());
+ const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
const bool is_mem_out_of_bounds =
- ool.stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
+ ool->stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
if (is_mem_out_of_bounds && env_->use_trap_handler) {
uint32_t pc = static_cast<uint32_t>(__ pc_offset());
DCHECK_EQ(pc, __ pc_offset());
protected_instructions_.emplace_back(
- trap_handler::ProtectedInstructionData{ool.pc, pc});
+ trap_handler::ProtectedInstructionData{ool->pc, pc});
}
if (!env_->runtime_exception_support) {
@@ -419,16 +441,16 @@ class LiftoffCompiler {
return;
}
- if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
+ if (!ool->regs_to_save.is_empty()) __ PushRegisters(ool->regs_to_save);
source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(ool.position), false);
- __ CallRuntimeStub(ool.stub);
+ __ pc_offset(), SourcePosition(ool->position), false);
+ __ CallRuntimeStub(ool->stub);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
- DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
- if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
+ DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
+ if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
- __ emit_jump(ool.continuation.get());
+ __ emit_jump(ool->continuation.get());
} else {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
@@ -437,7 +459,7 @@ class LiftoffCompiler {
void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
- GenerateOutOfLineCode(ool);
+ GenerateOutOfLineCode(&ool);
}
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
__ GetTotalFrameSlotCount());
@@ -449,7 +471,7 @@ class LiftoffCompiler {
}
void OnFirstError(FullDecoder* decoder) {
- ok_ = false;
+ if (!did_bailout()) bailout_reason_ = kDecodeError;
UnuseLabels(decoder);
asm_.AbortCompilation();
}
@@ -481,19 +503,20 @@ class LiftoffCompiler {
}
void Try(FullDecoder* decoder, Control* block) {
- unsupported(decoder, "try");
+ unsupported(decoder, kExceptionHandling, "try");
}
void Catch(FullDecoder* decoder, Control* block, Value* exception) {
- unsupported(decoder, "catch");
+ unsupported(decoder, kExceptionHandling, "catch");
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
- if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
- return unsupported(decoder, "multi-value if");
+ if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1) {
+ return unsupported(decoder, kMultiValue, "multi-value if");
+ }
// Allocate the else state.
if_block->else_state = base::make_unique<ElseState>();
@@ -773,8 +796,23 @@ class LiftoffCompiler {
__ emit_i64_eqz(dst.gp(), src);
});
break;
+ case WasmOpcode::kExprI64Clz:
+ case WasmOpcode::kExprI64Ctz:
+ case WasmOpcode::kExprI64Popcnt:
+ return unsupported(decoder, kComplexOperation,
+ WasmOpcodes::OpcodeName(opcode));
+ case WasmOpcode::kExprI32SConvertSatF32:
+ case WasmOpcode::kExprI32UConvertSatF32:
+ case WasmOpcode::kExprI32SConvertSatF64:
+ case WasmOpcode::kExprI32UConvertSatF64:
+ case WasmOpcode::kExprI64SConvertSatF32:
+ case WasmOpcode::kExprI64UConvertSatF32:
+ case WasmOpcode::kExprI64SConvertSatF64:
+ case WasmOpcode::kExprI64UConvertSatF64:
+ return unsupported(decoder, kNonTrappingFloatToInt,
+ WasmOpcodes::OpcodeName(opcode));
default:
- return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ UNREACHABLE();
}
#undef CASE_I32_UNOP
#undef CASE_I32_SIGN_EXTENSION
@@ -1104,8 +1142,12 @@ class LiftoffCompiler {
}
});
break;
+ case WasmOpcode::kExprI64Rol:
+ case WasmOpcode::kExprI64Ror:
+ return unsupported(decoder, kComplexOperation,
+ WasmOpcodes::OpcodeName(opcode));
default:
- return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ UNREACHABLE();
}
#undef CASE_I32_BINOP
#undef CASE_I32_BINOPI
@@ -1153,11 +1195,11 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, Value* result) {
- unsupported(decoder, "ref_null");
+ unsupported(decoder, kAnyRef, "ref_null");
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- unsupported(decoder, "func");
+ unsupported(decoder, kAnyRef, "func");
}
void Drop(FullDecoder* decoder, const Value& value) {
@@ -1169,7 +1211,9 @@ class LiftoffCompiler {
void ReturnImpl(FullDecoder* decoder) {
size_t num_returns = decoder->sig_->return_count();
- if (num_returns > 1) return unsupported(decoder, "multi-return");
+ if (num_returns > 1) {
+ return unsupported(decoder, kMultiValue, "multi-return");
+ }
if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
@@ -1201,24 +1245,24 @@ class LiftoffCompiler {
}
}
- void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
+ void SetLocalFromStackSlot(LiftoffAssembler::VarState* dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
- ValueType type = dst_slot.type();
- if (dst_slot.is_reg()) {
- LiftoffRegister slot_reg = dst_slot.reg();
+ ValueType type = dst_slot->type();
+ if (dst_slot->is_reg()) {
+ LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
+ __ Fill(dst_slot->reg(), state.stack_height() - 1, type);
return;
}
state.dec_used(slot_reg);
- dst_slot.MakeStack();
+ dst_slot->MakeStack();
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
- dst_slot = LiftoffAssembler::VarState(type, dst_reg);
+ *dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
@@ -1237,7 +1281,7 @@ class LiftoffCompiler {
target_slot = source_slot;
break;
case kStack:
- SetLocalFromStackSlot(target_slot, local_index);
+ SetLocalFromStackSlot(&target_slot, local_index);
break;
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
@@ -1254,12 +1298,12 @@ class LiftoffCompiler {
}
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
- LiftoffRegList& pinned, uint32_t* offset) {
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
+ LiftoffRegList* pinned, uint32_t* offset) {
+ Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
- global->index * sizeof(Address), kPointerLoadType, pinned);
+ global->index * sizeof(Address), kPointerLoadType, *pinned);
*offset = 0;
} else {
LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
@@ -1275,7 +1319,7 @@ class LiftoffCompiler {
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LoadType type = LoadType::ForValueType(global->type);
@@ -1290,20 +1334,20 @@ class LiftoffCompiler {
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
- void GetTable(FullDecoder* decoder, const Value& index, Value* result,
- TableIndexImmediate<validate>& imm) {
- unsupported(decoder, "table_get");
+ void TableGet(FullDecoder* decoder, const Value& index, Value* result,
+ const TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, kAnyRef, "table_get");
}
- void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
- TableIndexImmediate<validate>& imm) {
- unsupported(decoder, "table_set");
+ void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
+ const TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, kAnyRef, "table_set");
}
void Unreachable(FullDecoder* decoder) {
@@ -1370,8 +1414,8 @@ class LiftoffCompiler {
// Generate a branch table case, potentially reusing previously generated
// stack transfer code.
void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
- std::map<uint32_t, MovableLabel>& br_targets) {
- MovableLabel& label = br_targets[br_depth];
+ std::map<uint32_t, MovableLabel>* br_targets) {
+ MovableLabel& label = (*br_targets)[br_depth];
if (label.get()->is_bound()) {
__ jmp(label.get());
} else {
@@ -1384,13 +1428,13 @@ class LiftoffCompiler {
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
- BranchTableIterator<validate>& table_iterator,
- std::map<uint32_t, MovableLabel>& br_targets) {
+ BranchTableIterator<validate>* table_iterator,
+ std::map<uint32_t, MovableLabel>* br_targets) {
DCHECK_LT(min, max);
// Check base case.
if (max == min + 1) {
- DCHECK_EQ(min, table_iterator.cur_index());
- GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ DCHECK_EQ(min, table_iterator->cur_index());
+ GenerateBrCase(decoder, table_iterator->next(), br_targets);
return;
}
@@ -1422,14 +1466,14 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
value.gp(), tmp.gp());
- GenerateBrTable(decoder, tmp, value, 0, imm.table_count, table_iterator,
- br_targets);
+ GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
+ &br_targets);
__ bind(&case_default);
}
// Generate the default case.
- GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ GenerateBrCase(decoder, table_iterator.next(), &br_targets);
DCHECK(!table_iterator.has_next());
}
@@ -1593,7 +1637,7 @@ class LiftoffCompiler {
}
Register AddMemoryMasking(Register index, uint32_t* offset,
- LiftoffRegList& pinned) {
+ LiftoffRegList* pinned) {
if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
return index;
}
@@ -1601,11 +1645,11 @@ class LiftoffCompiler {
// Make sure that we can overwrite {index}.
if (__ cache_state()->is_used(LiftoffRegister(index))) {
Register old_index = index;
- pinned.clear(LiftoffRegister(old_index));
- index = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ pinned->clear(LiftoffRegister(old_index));
+ index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
- Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
+ Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
__ emit_ptrsize_add(index, index, *offset);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
@@ -1625,7 +1669,7 @@ class LiftoffCompiler {
return;
}
uint32_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, pinned);
+ index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -1659,7 +1703,7 @@ class LiftoffCompiler {
return;
}
uint32_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, pinned);
+ index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -1720,12 +1764,14 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- if (imm.sig->return_count() > 1)
- return unsupported(decoder, "multi-return");
+ if (imm.sig->return_count() > 1) {
+ return unsupported(decoder, kMultiValue, "multi-return");
+ }
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
- "return"))
+ "return")) {
return;
+ }
auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
@@ -1783,10 +1829,10 @@ class LiftoffCompiler {
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
- return unsupported(decoder, "multi-return");
+ return unsupported(decoder, kMultiValue, "multi-return");
}
if (imm.table_index != 0) {
- return unsupported(decoder, "table index != 0");
+ return unsupported(decoder, kAnyRef, "table index != 0");
}
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
@@ -1918,96 +1964,99 @@ class LiftoffCompiler {
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- unsupported(decoder, "return_call");
+ unsupported(decoder, kTailCall, "return_call");
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- unsupported(decoder, "return_call_indirect");
+ unsupported(decoder, kTailCall, "return_call_indirect");
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate>& imm, const Value& input,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
const Vector<Value>& args) {
- unsupported(decoder, "throw");
+ unsupported(decoder, kExceptionHandling, "throw");
}
void Rethrow(FullDecoder* decoder, const Value& exception) {
- unsupported(decoder, "rethrow");
+ unsupported(decoder, kExceptionHandling, "rethrow");
}
void BrOnException(FullDecoder* decoder, const Value& exception,
const ExceptionIndexImmediate<validate>& imm,
uint32_t depth, Vector<Value> values) {
- unsupported(decoder, "br_on_exn");
+ unsupported(decoder, kExceptionHandling, "br_on_exn");
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
- unsupported(decoder, "atomicop");
+ unsupported(decoder, kAtomics, "atomicop");
+ }
+ void AtomicFence(FullDecoder* decoder) {
+ unsupported(decoder, kAtomics, "atomic.fence");
}
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- unsupported(decoder, "memory.init");
+ unsupported(decoder, kBulkMemory, "memory.init");
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
- unsupported(decoder, "data.drop");
+ unsupported(decoder, kBulkMemory, "data.drop");
}
void MemoryCopy(FullDecoder* decoder,
const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- unsupported(decoder, "memory.copy");
+ unsupported(decoder, kBulkMemory, "memory.copy");
}
void MemoryFill(FullDecoder* decoder,
const MemoryIndexImmediate<validate>& imm, const Value& dst,
const Value& value, const Value& size) {
- unsupported(decoder, "memory.fill");
+ unsupported(decoder, kBulkMemory, "memory.fill");
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, "table.init");
+ unsupported(decoder, kBulkMemory, "table.init");
}
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
- unsupported(decoder, "elem.drop");
+ unsupported(decoder, kBulkMemory, "elem.drop");
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, "table.copy");
+ unsupported(decoder, kBulkMemory, "table.copy");
}
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- Value& value, Value& delta, Value* result) {
- unsupported(decoder, "table.grow");
+ const Value& value, const Value& delta, Value* result) {
+ unsupported(decoder, kAnyRef, "table.grow");
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
- unsupported(decoder, "table.size");
+ unsupported(decoder, kAnyRef, "table.size");
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- Value& start, Value& value, Value& count) {
- unsupported(decoder, "table.fill");
+ const Value& start, const Value& value, const Value& count) {
+ unsupported(decoder, kAnyRef, "table.fill");
}
private:
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
- bool ok_ = true;
+ LiftoffBailoutReason bailout_reason_ = kSuccess;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
@@ -2066,11 +2115,17 @@ WasmCompilationResult ExecuteLiftoffCompilation(AccountingAllocator* allocator,
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) {
- compiler->OnFirstError(&decoder);
- return WasmCompilationResult{};
- }
- if (!compiler->ok()) {
+ if (decoder.failed()) compiler->OnFirstError(&decoder);
+
+ // Check that the histogram for the bailout reasons has the correct size.
+ DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min());
+ DCHECK_EQ(kNumBailoutReasons - 1, counters->liftoff_bailout_reasons()->max());
+ DCHECK_EQ(kNumBailoutReasons,
+ counters->liftoff_bailout_reasons()->num_buckets());
+ // Register the bailout reason (can also be {kSuccess}).
+ counters->liftoff_bailout_reasons()->AddSample(
+ static_cast<int>(compiler->bailout_reason()));
+ if (compiler->did_bailout()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
return WasmCompilationResult{};
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index f310b9a54b..d40b92bef4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -19,6 +19,38 @@ struct CompilationEnv;
struct FunctionBody;
struct WasmFeatures;
+// Note: If this list changes, also the histogram "V8.LiftoffBailoutReasons"
+// on the chromium side needs to be updated.
+// Deprecating entries is always fine. Repurposing works if you don't care about
+// temporary mix-ups. Increasing the number of reasons {kNumBailoutReasons} is
+// more tricky, and might require introducing a new (updated) histogram.
+enum LiftoffBailoutReason : int8_t {
+ // Nothing actually failed.
+ kSuccess = 0,
+ // Compilation failed, but not because of Liftoff.
+ kDecodeError = 1,
+ // Liftoff is not implemented on that architecture.
+ kUnsupportedArchitecture = 2,
+ // More complex code would be needed because a CPU feature is not present.
+ kMissingCPUFeature = 3,
+ // Liftoff does not implement a complex (and rare) instruction.
+ kComplexOperation = 4,
+ // Unimplemented proposals:
+ kSimd = 5,
+ kAnyRef = 6,
+ kExceptionHandling = 7,
+ kMultiValue = 8,
+ kTailCall = 9,
+ kAtomics = 10,
+ kBulkMemory = 11,
+ kNonTrappingFloatToInt = 12,
+ // A little gap, for forward compatibility.
+ // Any other reason (use rarely; introduce new reasons if this spikes).
+ kOtherReason = 20,
+ // Marker:
+ kNumBailoutReasons
+};
+
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
Counters*, WasmFeatures* detected_features);
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 5be769685c..e82ffe8f67 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("mips " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -854,7 +852,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f32_copysign");
+ bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -881,7 +879,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f64_copysign");
+ bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
@@ -1026,10 +1024,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
- } else {
- BAILOUT("emit_type_conversion kExprI32SConvertF64");
- return true;
}
+ bailout(kUnsupportedArchitecture, "kExprI32SConvertF64");
+ return true;
}
case kExprI32UConvertF64: {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -1049,10 +1046,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
- } else {
- BAILOUT("emit_type_conversion kExprI32UConvertF64");
- return true;
}
+ bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
+ return true;
}
case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp());
@@ -1116,26 +1112,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -1239,29 +1235,29 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
- Condition condition) {
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+ bool* predicate) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kUnequal:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -1287,7 +1283,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1316,7 +1312,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1511,6 +1507,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 1da72cb9b8..9c87dca733 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("mips64 " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -742,7 +740,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f32_copysign");
+ bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -769,7 +767,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f64_copysign");
+ bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
@@ -1010,26 +1008,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -1096,29 +1094,29 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
- Condition condition) {
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+ bool* predicate) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kUnequal:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -1144,7 +1142,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1173,7 +1171,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1351,6 +1349,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 577df835e8..a690a1c090 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -7,20 +7,19 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("ppc " reason)
namespace v8 {
namespace internal {
namespace wasm {
int LiftoffAssembler::PrepareStackFrame() {
- BAILOUT("PrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
- BAILOUT("PatchPrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
@@ -29,136 +28,136 @@ void LiftoffAssembler::AbortCompilation() { FinishCode(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- BAILOUT("LoadConstant");
+ bailout(kUnsupportedArchitecture, "LoadConstant");
}
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
- BAILOUT("LoadFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
- BAILOUT("LoadTaggedPointerFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
void LiftoffAssembler::SpillInstance(Register instance) {
- BAILOUT("SpillInstance");
+ bailout(kUnsupportedArchitecture, "SpillInstance");
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- BAILOUT("FillInstanceInto");
+ bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
LiftoffRegList pinned) {
- BAILOUT("LoadTaggedPointer");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- BAILOUT("Load");
+ bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- BAILOUT("Store");
+ bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- BAILOUT("MoveStackValue");
+ bailout(kUnsupportedArchitecture, "MoveStackValue");
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- BAILOUT("Move Register");
+ bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
- BAILOUT("Move DoubleRegister");
+ bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
- BAILOUT("Spill register");
+ bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- BAILOUT("Spill value");
+ bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
- BAILOUT("Fill");
+ bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
- BAILOUT("FillI64Half");
+ bailout(kUnsupportedArchitecture, "FillI64Half");
}
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("i32 binop:: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop:: " #name); \
}
#define UNIMPLEMENTED_I32_BINOP_I(name) \
UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
- BAILOUT("i32 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- BAILOUT("i64 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP_I(name) \
UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t imm) { \
- BAILOUT("i64_i binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64_i binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i32 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i64 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
@@ -227,65 +226,65 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i32_divs");
+ bailout(kUnsupportedArchitecture, "i32_divs");
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_divu");
+ bailout(kUnsupportedArchitecture, "i32_divu");
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_rems");
+ bailout(kUnsupportedArchitecture, "i32_rems");
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_remu");
+ bailout(kUnsupportedArchitecture, "i32_remu");
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
- BAILOUT("i32_shr");
+ bailout(kUnsupportedArchitecture, "i32_shr");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i64_divs");
+ bailout(kUnsupportedArchitecture, "i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_divu");
+ bailout(kUnsupportedArchitecture, "i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_rems");
+ bailout(kUnsupportedArchitecture, "i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_remu");
+ bailout(kUnsupportedArchitecture, "i64_remu");
return true;
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
int amount) {
- BAILOUT("i64_shr");
+ bailout(kUnsupportedArchitecture, "i64_shr");
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_PPC64
- BAILOUT("emit_i32_to_intptr");
+ bailout(kUnsupportedArchitecture, "emit_i32_to_intptr");
#else
// This is a nop on ppc32.
#endif
@@ -294,96 +293,100 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
+ bailout(kUnsupportedArchitecture, "emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
-void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- BAILOUT("emit_cond_jump");
+ bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- BAILOUT("emit_i32_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- BAILOUT("emit_i32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- BAILOUT("emit_i64_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- BAILOUT("emit_i64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- BAILOUT("StackCheck");
+ bailout(kUnsupportedArchitecture, "StackCheck");
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- BAILOUT("CallTrapCallbackForTesting");
+ bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ bailout(kUnsupportedArchitecture, "AssertUnreachable");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ bailout(kUnsupportedArchitecture, "PushRegisters");
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ bailout(kUnsupportedArchitecture, "PopRegisters");
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- BAILOUT("DropStackSlotsAndRet");
+ bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -391,33 +394,33 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- BAILOUT("CallC");
+ bailout(kUnsupportedArchitecture, "CallC");
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ bailout(kUnsupportedArchitecture, "CallIndirect");
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- BAILOUT("CallRuntimeStub");
+ bailout(kUnsupportedArchitecture, "CallRuntimeStub");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ bailout(kUnsupportedArchitecture, "AllocateStackSlot");
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
- asm_->BAILOUT("LiftoffStackSlots::Construct");
+ asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1e01bec407..d17c7dada1 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -7,20 +7,19 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("s390 " reason)
namespace v8 {
namespace internal {
namespace wasm {
int LiftoffAssembler::PrepareStackFrame() {
- BAILOUT("PrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
- BAILOUT("PatchPrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
void LiftoffAssembler::FinishCode() {}
@@ -29,136 +28,136 @@ void LiftoffAssembler::AbortCompilation() {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- BAILOUT("LoadConstant");
+ bailout(kUnsupportedArchitecture, "LoadConstant");
}
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
- BAILOUT("LoadFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
- BAILOUT("LoadTaggedPointerFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
void LiftoffAssembler::SpillInstance(Register instance) {
- BAILOUT("SpillInstance");
+ bailout(kUnsupportedArchitecture, "SpillInstance");
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- BAILOUT("FillInstanceInto");
+ bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
LiftoffRegList pinned) {
- BAILOUT("LoadTaggedPointer");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- BAILOUT("Load");
+ bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- BAILOUT("Store");
+ bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- BAILOUT("MoveStackValue");
+ bailout(kUnsupportedArchitecture, "MoveStackValue");
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- BAILOUT("Move Register");
+ bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
- BAILOUT("Move DoubleRegister");
+ bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
- BAILOUT("Spill register");
+ bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- BAILOUT("Spill value");
+ bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
- BAILOUT("Fill");
+ bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
- BAILOUT("FillI64Half");
+ bailout(kUnsupportedArchitecture, "FillI64Half");
}
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("i32 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop: " #name); \
}
#define UNIMPLEMENTED_I32_BINOP_I(name) \
UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
- BAILOUT("i32 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- BAILOUT("i64 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP_I(name) \
UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t imm) { \
- BAILOUT("i64 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop_i: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i32 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i64 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
@@ -227,65 +226,65 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i32_divs");
+ bailout(kUnsupportedArchitecture, "i32_divs");
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_divu");
+ bailout(kUnsupportedArchitecture, "i32_divu");
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_rems");
+ bailout(kUnsupportedArchitecture, "i32_rems");
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_remu");
+ bailout(kUnsupportedArchitecture, "i32_remu");
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
- BAILOUT("i32_shr");
+ bailout(kUnsupportedArchitecture, "i32_shr");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i64_divs");
+ bailout(kUnsupportedArchitecture, "i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_divu");
+ bailout(kUnsupportedArchitecture, "i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_rems");
+ bailout(kUnsupportedArchitecture, "i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_remu");
+ bailout(kUnsupportedArchitecture, "i64_remu");
return true;
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
int amount) {
- BAILOUT("i64_shr");
+ bailout(kUnsupportedArchitecture, "i64_shr");
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
- BAILOUT("emit_i32_to_intptr");
+ bailout(kUnsupportedArchitecture, "emit_i32_to_intptr");
#else
// This is a nop on s390.
#endif
@@ -294,96 +293,100 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
+ bailout(kUnsupportedArchitecture, "emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
-void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- BAILOUT("emit_cond_jump");
+ bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- BAILOUT("emit_i32_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- BAILOUT("emit_i32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- BAILOUT("emit_i64_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- BAILOUT("emit_i64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- BAILOUT("StackCheck");
+ bailout(kUnsupportedArchitecture, "StackCheck");
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- BAILOUT("CallTrapCallbackForTesting");
+ bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ bailout(kUnsupportedArchitecture, "AssertUnreachable");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ bailout(kUnsupportedArchitecture, "PushRegisters");
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ bailout(kUnsupportedArchitecture, "PopRegisters");
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- BAILOUT("DropStackSlotsAndRet");
+ bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -391,33 +394,33 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- BAILOUT("CallC");
+ bailout(kUnsupportedArchitecture, "CallC");
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ bailout(kUnsupportedArchitecture, "CallIndirect");
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- BAILOUT("CallRuntimeStub");
+ bailout(kUnsupportedArchitecture, "CallRuntimeStub");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ bailout(kUnsupportedArchitecture, "AllocateStackSlot");
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
- asm_->BAILOUT("LiftoffStackSlots::Construct");
+ asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index cbff0d4da9..43637985d0 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define REQUIRE_CPU_FEATURE(name, ...) \
- if (!CpuFeatures::IsSupported(name)) { \
- bailout("no " #name); \
- return __VA_ARGS__; \
- } \
+#define REQUIRE_CPU_FEATURE(name, ...) \
+ if (!CpuFeatures::IsSupported(name)) { \
+ bailout(kMissingCPUFeature, "no " #name); \
+ return __VA_ARGS__; \
+ } \
CpuFeatureScope feature(this, name);
namespace liftoff {
@@ -1260,7 +1260,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout("no SSE4.1");
+ assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index e5c1fa4686..86bba189b8 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -28,577 +28,131 @@
#include "include/libplatform/libplatform.h"
#include "src/api/api-inl.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-instantiate.h"
+#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
-// BEGIN FILE wasm-bin.cc
-
namespace wasm {
-namespace bin {
-
-////////////////////////////////////////////////////////////////////////////////
-// Encoding
-
-void encode_header(char*& ptr) {
- std::memcpy(ptr,
- "\x00"
- "asm\x01\x00\x00\x00",
- 8);
- ptr += 8;
-}
-
-void encode_size32(char*& ptr, size_t n) {
- assert(n <= 0xffffffff);
- for (int i = 0; i < 5; ++i) {
- *ptr++ = (n & 0x7f) | (i == 4 ? 0x00 : 0x80);
- n = n >> 7;
- }
-}
-void encode_valtype(char*& ptr, const ValType* type) {
- switch (type->kind()) {
- case I32:
- *ptr++ = 0x7f;
- break;
- case I64:
- *ptr++ = 0x7e;
- break;
- case F32:
- *ptr++ = 0x7d;
- break;
- case F64:
- *ptr++ = 0x7c;
- break;
- case FUNCREF:
- *ptr++ = 0x70;
- break;
- case ANYREF:
- *ptr++ = 0x6f;
- break;
- default:
- UNREACHABLE();
- }
-}
-
-auto zero_size(const ValType* type) -> size_t {
- switch (type->kind()) {
- case I32:
- return 1;
- case I64:
- return 1;
- case F32:
- return 4;
- case F64:
- return 8;
- case FUNCREF:
- return 0;
- case ANYREF:
- return 0;
- default:
- UNREACHABLE();
- }
-}
-
-void encode_const_zero(char*& ptr, const ValType* type) {
- switch (type->kind()) {
- case I32:
- *ptr++ = 0x41;
- break;
- case I64:
- *ptr++ = 0x42;
- break;
- case F32:
- *ptr++ = 0x43;
- break;
- case F64:
- *ptr++ = 0x44;
- break;
- default:
- UNREACHABLE();
- }
- for (size_t i = 0; i < zero_size(type); ++i) *ptr++ = 0;
-}
-
-auto wrapper(const FuncType* type) -> vec<byte_t> {
- auto in_arity = type->params().size();
- auto out_arity = type->results().size();
- auto size = 39 + in_arity + out_arity;
- auto binary = vec<byte_t>::make_uninitialized(size);
- auto ptr = binary.get();
-
- encode_header(ptr);
-
- *ptr++ = i::wasm::kTypeSectionCode;
- encode_size32(ptr, 12 + in_arity + out_arity); // size
- *ptr++ = 1; // length
- *ptr++ = i::wasm::kWasmFunctionTypeCode;
- encode_size32(ptr, in_arity);
- for (size_t i = 0; i < in_arity; ++i) {
- encode_valtype(ptr, type->params()[i].get());
- }
- encode_size32(ptr, out_arity);
- for (size_t i = 0; i < out_arity; ++i) {
- encode_valtype(ptr, type->results()[i].get());
- }
-
- *ptr++ = i::wasm::kImportSectionCode;
- *ptr++ = 5; // size
- *ptr++ = 1; // length
- *ptr++ = 0; // module length
- *ptr++ = 0; // name length
- *ptr++ = i::wasm::kExternalFunction;
- *ptr++ = 0; // type index
-
- *ptr++ = i::wasm::kExportSectionCode;
- *ptr++ = 4; // size
- *ptr++ = 1; // length
- *ptr++ = 0; // name length
- *ptr++ = i::wasm::kExternalFunction;
- *ptr++ = 0; // func index
-
- assert(ptr - binary.get() == static_cast<ptrdiff_t>(size));
- return binary;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Decoding
-
-// Numbers
-
-auto u32(const byte_t*& pos) -> uint32_t {
- uint32_t n = 0;
- uint32_t shift = 0;
- byte_t b;
- do {
- b = *pos++;
- n += (b & 0x7f) << shift;
- shift += 7;
- } while ((b & 0x80) != 0);
- return n;
-}
+namespace {
-auto u64(const byte_t*& pos) -> uint64_t {
+auto ReadLebU64(const byte_t** pos) -> uint64_t {
uint64_t n = 0;
uint64_t shift = 0;
byte_t b;
do {
- b = *pos++;
+ b = **pos;
+ (*pos)++;
n += (b & 0x7f) << shift;
shift += 7;
} while ((b & 0x80) != 0);
return n;
}
-void u32_skip(const byte_t*& pos) { bin::u32(pos); }
-
-// Names
-
-auto name(const byte_t*& pos) -> Name {
- auto size = bin::u32(pos);
- auto start = pos;
- auto name = Name::make_uninitialized(size);
- std::memcpy(name.get(), start, size);
- pos += size;
- return name;
-}
-
-// Types
-
-auto valtype(const byte_t*& pos) -> own<wasm::ValType*> {
- switch (*pos++) {
- case i::wasm::kLocalI32:
- return ValType::make(I32);
- case i::wasm::kLocalI64:
- return ValType::make(I64);
- case i::wasm::kLocalF32:
- return ValType::make(F32);
- case i::wasm::kLocalF64:
- return ValType::make(F64);
- case i::wasm::kLocalAnyFunc:
- return ValType::make(FUNCREF);
- case i::wasm::kLocalAnyRef:
- return ValType::make(ANYREF);
+ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
+ switch (v8_valtype) {
+ case i::wasm::kWasmI32:
+ return I32;
+ case i::wasm::kWasmI64:
+ return I64;
+ case i::wasm::kWasmF32:
+ return F32;
+ case i::wasm::kWasmF64:
+ return F64;
+ case i::wasm::kWasmFuncRef:
+ return FUNCREF;
+ case i::wasm::kWasmAnyRef:
+ return ANYREF;
default:
// TODO(wasm+): support new value types
UNREACHABLE();
}
- return {};
-}
-
-auto mutability(const byte_t*& pos) -> Mutability {
- return *pos++ ? VAR : CONST;
-}
-
-auto limits(const byte_t*& pos) -> Limits {
- auto tag = *pos++;
- auto min = bin::u32(pos);
- if ((tag & 0x01) == 0) {
- return Limits(min);
- } else {
- auto max = bin::u32(pos);
- return Limits(min, max);
- }
-}
-
-auto stacktype(const byte_t*& pos) -> vec<ValType*> {
- size_t size = bin::u32(pos);
- auto v = vec<ValType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) v[i] = bin::valtype(pos);
- return v;
-}
-
-auto functype(const byte_t*& pos) -> own<FuncType*> {
- assert(*pos == i::wasm::kWasmFunctionTypeCode);
- ++pos;
- auto params = bin::stacktype(pos);
- auto results = bin::stacktype(pos);
- return FuncType::make(std::move(params), std::move(results));
-}
-
-auto globaltype(const byte_t*& pos) -> own<GlobalType*> {
- auto content = bin::valtype(pos);
- auto mutability = bin::mutability(pos);
- return GlobalType::make(std::move(content), mutability);
-}
-
-auto tabletype(const byte_t*& pos) -> own<TableType*> {
- auto elem = bin::valtype(pos);
- auto limits = bin::limits(pos);
- return TableType::make(std::move(elem), limits);
}
-auto memorytype(const byte_t*& pos) -> own<MemoryType*> {
- auto limits = bin::limits(pos);
- return MemoryType::make(limits);
-}
-
-// Expressions
-
-void expr_skip(const byte_t*& pos) {
- switch (*pos++) {
- case i::wasm::kExprI32Const:
- case i::wasm::kExprI64Const:
- case i::wasm::kExprGetGlobal: {
- bin::u32_skip(pos);
- } break;
- case i::wasm::kExprF32Const: {
- pos += 4;
- } break;
- case i::wasm::kExprF64Const: {
- pos += 8;
- } break;
- default: {
- // TODO(wasm+): support new expression forms
+i::wasm::ValueType WasmValKindToV8(ValKind kind) {
+ switch (kind) {
+ case I32:
+ return i::wasm::kWasmI32;
+ case I64:
+ return i::wasm::kWasmI64;
+ case F32:
+ return i::wasm::kWasmF32;
+ case F64:
+ return i::wasm::kWasmF64;
+ case FUNCREF:
+ return i::wasm::kWasmFuncRef;
+ case ANYREF:
+ return i::wasm::kWasmAnyRef;
+ default:
+ // TODO(wasm+): support new value types
UNREACHABLE();
- }
}
- ++pos; // end
-}
-
-// Sections
-
-auto section(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
- -> const byte_t* {
- const byte_t* end = binary.get() + binary.size();
- const byte_t* pos = binary.get() + 8; // skip header
- while (pos < end && *pos++ != sec) {
- auto size = bin::u32(pos);
- pos += size;
- }
- if (pos == end) return nullptr;
- bin::u32_skip(pos);
- return pos;
-}
-
-// Only for asserts/DCHECKs.
-auto section_end(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
- -> const byte_t* {
- const byte_t* end = binary.get() + binary.size();
- const byte_t* pos = binary.get() + 8; // skip header
- while (pos < end && *pos != sec) {
- ++pos;
- auto size = bin::u32(pos);
- pos += size;
- }
- if (pos == end) return nullptr;
- ++pos;
- auto size = bin::u32(pos);
- return pos + size;
-}
-
-// Type section
-
-auto types(const vec<const byte_t>& binary) -> vec<FuncType*> {
- auto pos = bin::section(binary, i::wasm::kTypeSectionCode);
- if (pos == nullptr) return vec<FuncType*>::make();
- size_t size = bin::u32(pos);
- // TODO(wasm+): support new deftypes
- auto v = vec<FuncType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- v[i] = bin::functype(pos);
- }
- assert(pos == bin::section_end(binary, i::wasm::kTypeSectionCode));
- return v;
-}
-
-// Import section
-
-auto imports(const vec<const byte_t>& binary, const vec<FuncType*>& types)
- -> vec<ImportType*> {
- auto pos = bin::section(binary, i::wasm::kImportSectionCode);
- if (pos == nullptr) return vec<ImportType*>::make();
- size_t size = bin::u32(pos);
- auto v = vec<ImportType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- auto module = bin::name(pos);
- auto name = bin::name(pos);
- own<ExternType*> type;
- switch (*pos++) {
- case i::wasm::kExternalFunction:
- type = types[bin::u32(pos)]->copy();
- break;
- case i::wasm::kExternalTable:
- type = bin::tabletype(pos);
- break;
- case i::wasm::kExternalMemory:
- type = bin::memorytype(pos);
- break;
- case i::wasm::kExternalGlobal:
- type = bin::globaltype(pos);
- break;
- default:
- UNREACHABLE();
- }
- v[i] =
- ImportType::make(std::move(module), std::move(name), std::move(type));
- }
- assert(pos == bin::section_end(binary, i::wasm::kImportSectionCode));
- return v;
-}
-
-auto count(const vec<ImportType*>& imports, ExternKind kind) -> uint32_t {
- uint32_t n = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- if (imports[i]->type()->kind() == kind) ++n;
- }
- return n;
}
-// Function section
-
-auto funcs(const vec<const byte_t>& binary, const vec<ImportType*>& imports,
- const vec<FuncType*>& types) -> vec<FuncType*> {
- auto pos = bin::section(binary, i::wasm::kFunctionSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v =
- vec<FuncType*>::make_uninitialized(size + count(imports, EXTERN_FUNC));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_FUNC) {
- v[j++] = et->func()->copy();
- }
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = types[bin::u32(pos)]->copy();
- }
- assert(pos == bin::section_end(binary, i::wasm::kFunctionSectionCode));
- }
- return v;
+Name GetNameFromWireBytes(const i::wasm::WireBytesRef& ref,
+ const i::Vector<const uint8_t>& wire_bytes) {
+ DCHECK_LE(ref.offset(), wire_bytes.length());
+ DCHECK_LE(ref.end_offset(), wire_bytes.length());
+ Name name = Name::make_uninitialized(ref.length());
+ std::memcpy(name.get(), wire_bytes.begin() + ref.offset(), ref.length());
+ return name;
}
-// Global section
-
-auto globals(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<GlobalType*> {
- auto pos = bin::section(binary, i::wasm::kGlobalSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v = vec<GlobalType*>::make_uninitialized(size +
- count(imports, EXTERN_GLOBAL));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_GLOBAL) {
- v[j++] = et->global()->copy();
- }
+own<FuncType*> FunctionSigToFuncType(const i::wasm::FunctionSig* sig) {
+ size_t param_count = sig->parameter_count();
+ vec<ValType*> params = vec<ValType*>::make_uninitialized(param_count);
+ for (size_t i = 0; i < param_count; i++) {
+ params[i] = ValType::make(V8ValueTypeToWasm(sig->GetParam(i)));
}
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::globaltype(pos);
- expr_skip(pos);
- }
- assert(pos == bin::section_end(binary, i::wasm::kGlobalSectionCode));
+ size_t return_count = sig->return_count();
+ vec<ValType*> results = vec<ValType*>::make_uninitialized(return_count);
+ for (size_t i = 0; i < return_count; i++) {
+ results[i] = ValType::make(V8ValueTypeToWasm(sig->GetReturn(i)));
}
- return v;
+ return FuncType::make(std::move(params), std::move(results));
}
-// Table section
-
-auto tables(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<TableType*> {
- auto pos = bin::section(binary, i::wasm::kTableSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v =
- vec<TableType*>::make_uninitialized(size + count(imports, EXTERN_TABLE));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_TABLE) {
- v[j++] = et->table()->copy();
- }
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::tabletype(pos);
+own<ExternType*> GetImportExportType(const i::wasm::WasmModule* module,
+ const i::wasm::ImportExportKindCode kind,
+ const uint32_t index) {
+ switch (kind) {
+ case i::wasm::kExternalFunction: {
+ return FunctionSigToFuncType(module->functions[index].sig);
}
- assert(pos == bin::section_end(binary, i::wasm::kTableSectionCode));
- }
- return v;
-}
-
-// Memory section
-
-auto memories(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<MemoryType*> {
- auto pos = bin::section(binary, i::wasm::kMemorySectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v = vec<MemoryType*>::make_uninitialized(size +
- count(imports, EXTERN_MEMORY));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_MEMORY) {
- v[j++] = et->memory()->copy();
+ case i::wasm::kExternalTable: {
+ const i::wasm::WasmTable& table = module->tables[index];
+ own<ValType*> elem = ValType::make(V8ValueTypeToWasm(table.type));
+ Limits limits(table.initial_size,
+ table.has_maximum_size ? table.maximum_size : -1);
+ return TableType::make(std::move(elem), limits);
}
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::memorytype(pos);
+ case i::wasm::kExternalMemory: {
+ DCHECK(module->has_memory);
+ Limits limits(module->initial_pages,
+ module->has_maximum_pages ? module->maximum_pages : -1);
+ return MemoryType::make(limits);
}
- assert(pos == bin::section_end(binary, i::wasm::kMemorySectionCode));
- }
- return v;
-}
-
-// Export section
-
-auto exports(const vec<const byte_t>& binary, const vec<FuncType*>& funcs,
- const vec<GlobalType*>& globals, const vec<TableType*>& tables,
- const vec<MemoryType*>& memories) -> vec<ExportType*> {
- auto pos = bin::section(binary, i::wasm::kExportSectionCode);
- if (pos == nullptr) return vec<ExportType*>::make();
- size_t size = bin::u32(pos);
- auto exports = vec<ExportType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- auto name = bin::name(pos);
- auto tag = *pos++;
- auto index = bin::u32(pos);
- own<ExternType*> type;
- switch (tag) {
- case i::wasm::kExternalFunction:
- type = funcs[index]->copy();
- break;
- case i::wasm::kExternalTable:
- type = tables[index]->copy();
- break;
- case i::wasm::kExternalMemory:
- type = memories[index]->copy();
- break;
- case i::wasm::kExternalGlobal:
- type = globals[index]->copy();
- break;
- default:
- UNREACHABLE();
+ case i::wasm::kExternalGlobal: {
+ const i::wasm::WasmGlobal& global = module->globals[index];
+ own<ValType*> content = ValType::make(V8ValueTypeToWasm(global.type));
+ Mutability mutability = global.mutability ? VAR : CONST;
+ return GlobalType::make(std::move(content), mutability);
}
- exports[i] = ExportType::make(std::move(name), std::move(type));
- }
- assert(pos == bin::section_end(binary, i::wasm::kExportSectionCode));
- return exports;
-}
-
-auto imports(const vec<const byte_t>& binary) -> vec<ImportType*> {
- return bin::imports(binary, bin::types(binary));
-}
-
-auto exports(const vec<const byte_t>& binary) -> vec<ExportType*> {
- auto types = bin::types(binary);
- auto imports = bin::imports(binary, types);
- auto funcs = bin::funcs(binary, imports, types);
- auto globals = bin::globals(binary, imports);
- auto tables = bin::tables(binary, imports);
- auto memories = bin::memories(binary, imports);
- return bin::exports(binary, funcs, globals, tables, memories);
-}
-
-} // namespace bin
-} // namespace wasm
-
-// BEGIN FILE wasm-v8-lowlevel.cc
-
-namespace v8 {
-namespace wasm {
-
-// Foreign pointers
-
-auto foreign_new(v8::Isolate* isolate, void* ptr) -> v8::Local<v8::Value> {
- auto foreign = v8::FromCData(reinterpret_cast<i::Isolate*>(isolate),
- reinterpret_cast<i::Address>(ptr));
- return v8::Utils::ToLocal(foreign);
-}
-
-auto foreign_get(v8::Local<v8::Value> val) -> void* {
- auto foreign = v8::Utils::OpenHandle(*val);
- if (!foreign->IsForeign()) return nullptr;
- auto addr = v8::ToCData<i::Address>(*foreign);
- return reinterpret_cast<void*>(addr);
-}
-
-// Types
-
-auto v8_valtype_to_wasm(i::wasm::ValueType v8_valtype) -> ::wasm::ValKind {
- switch (v8_valtype) {
- case i::wasm::kWasmI32:
- return ::wasm::I32;
- case i::wasm::kWasmI64:
- return ::wasm::I64;
- case i::wasm::kWasmF32:
- return ::wasm::F32;
- case i::wasm::kWasmF64:
- return ::wasm::F64;
- default:
- // TODO(wasm+): support new value types
- UNREACHABLE();
- }
-}
-
-i::wasm::ValueType wasm_valtype_to_v8(::wasm::ValKind type) {
- switch (type) {
- case ::wasm::I32:
- return i::wasm::kWasmI32;
- case ::wasm::I64:
- return i::wasm::kWasmI64;
- case ::wasm::F32:
- return i::wasm::kWasmF32;
- case ::wasm::F64:
- return i::wasm::kWasmF64;
- default:
- // TODO(wasm+): support new value types
+ case i::wasm::kExternalException:
UNREACHABLE();
+ return {};
}
}
-} // namespace wasm
-} // namespace v8
+} // namespace
/// BEGIN FILE wasm-v8.cc
-namespace wasm {
-
///////////////////////////////////////////////////////////////////////////////
// Auxiliaries
@@ -695,6 +249,7 @@ void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config*>&& config) -> own<Engine*> {
i::FLAG_expose_gc = true;
+ i::FLAG_experimental_wasm_anyref = true;
i::FLAG_experimental_wasm_bigint = true;
i::FLAG_experimental_wasm_mv = true;
auto engine = new (std::nothrow) EngineImpl;
@@ -714,7 +269,6 @@ StoreImpl::~StoreImpl() {
v8::kGCCallbackFlagForced);
#endif
context()->Exit();
- isolate_->Exit();
isolate_->Dispose();
delete create_params_.array_buffer_allocator;
}
@@ -739,7 +293,6 @@ auto Store::make(Engine*) -> own<Store*> {
if (!isolate) return own<Store*>();
{
- v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
// Create context.
@@ -750,8 +303,10 @@ auto Store::make(Engine*) -> own<Store*> {
store->isolate_ = isolate;
store->context_ = v8::Eternal<v8::Context>(isolate, context);
}
-
- store->isolate()->Enter();
+ // We intentionally do not call isolate->Enter() here, because that would
+ // prevent embedders from using stores with overlapping but non-nested
+ // lifetimes. The consequence is that Isolate::Current() is dysfunctional
+ // and hence must not be called by anything reachable via this file.
store->context()->Enter();
isolate->SetData(0, store.get());
@@ -831,7 +386,8 @@ struct FuncTypeImpl : ExternTypeImpl {
vec<ValType*> params;
vec<ValType*> results;
- FuncTypeImpl(vec<ValType*>& params, vec<ValType*>& results)
+ FuncTypeImpl(vec<ValType*>& params, // NOLINT(runtime/references)
+ vec<ValType*>& results) // NOLINT(runtime/references)
: ExternTypeImpl(EXTERN_FUNC),
params(std::move(params)),
results(std::move(results)) {}
@@ -884,7 +440,8 @@ struct GlobalTypeImpl : ExternTypeImpl {
own<ValType*> content;
Mutability mutability;
- GlobalTypeImpl(own<ValType*>& content, Mutability mutability)
+ GlobalTypeImpl(own<ValType*>& content, // NOLINT(runtime/references)
+ Mutability mutability)
: ExternTypeImpl(EXTERN_GLOBAL),
content(std::move(content)),
mutability(mutability) {}
@@ -936,7 +493,8 @@ struct TableTypeImpl : ExternTypeImpl {
own<ValType*> element;
Limits limits;
- TableTypeImpl(own<ValType*>& element, Limits limits)
+ TableTypeImpl(own<ValType*>& element, // NOLINT(runtime/references)
+ Limits limits)
: ExternTypeImpl(EXTERN_TABLE),
element(std::move(element)),
limits(limits) {}
@@ -1028,7 +586,9 @@ struct ImportTypeImpl {
Name name;
own<ExternType*> type;
- ImportTypeImpl(Name& module, Name& name, own<ExternType*>& type)
+ ImportTypeImpl(Name& module, // NOLINT(runtime/references)
+ Name& name, // NOLINT(runtime/references)
+ own<ExternType*>& type) // NOLINT(runtime/references)
: module(std::move(module)),
name(std::move(name)),
type(std::move(type)) {}
@@ -1071,7 +631,8 @@ struct ExportTypeImpl {
Name name;
own<ExternType*> type;
- ExportTypeImpl(Name& name, own<ExternType*>& type)
+ ExportTypeImpl(Name& name, // NOLINT(runtime/references)
+ own<ExternType*>& type) // NOLINT(runtime/references)
: name(std::move(name)), type(std::move(type)) {}
~ExportTypeImpl() {}
@@ -1103,89 +664,14 @@ auto ExportType::type() const -> const ExternType* {
return impl(this)->type.get();
}
-///////////////////////////////////////////////////////////////////////////////
-// Conversions of values from and to V8 objects
-
-auto val_to_v8(StoreImpl* store, const Val& v) -> v8::Local<v8::Value> {
- auto isolate = store->isolate();
- switch (v.kind()) {
- case I32:
- return v8::Integer::NewFromUnsigned(isolate, v.i32());
- case I64:
- return v8::BigInt::New(isolate, v.i64());
- case F32:
- return v8::Number::New(isolate, v.f32());
- case F64:
- return v8::Number::New(isolate, v.f64());
- case ANYREF:
- case FUNCREF: {
- if (v.ref() == nullptr) {
- return v8::Null(isolate);
- } else {
- WASM_UNIMPLEMENTED("ref value");
- }
- }
- default:
- UNREACHABLE();
- }
-}
-
-own<Val> v8_to_val(i::Isolate* isolate, i::Handle<i::Object> value,
- ValKind kind) {
- switch (kind) {
- case I32:
- do {
- if (value->IsSmi()) return Val(i::Smi::ToInt(*value));
- if (value->IsHeapNumber()) {
- return Val(i::DoubleToInt32(i::HeapNumber::cast(*value).value()));
- }
- value = i::Object::ToInt32(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case I64:
- if (value->IsBigInt()) return Val(i::BigInt::cast(*value).AsInt64());
- return Val(
- i::BigInt::FromObject(isolate, value).ToHandleChecked()->AsInt64());
- case F32:
- do {
- if (value->IsSmi()) {
- return Val(static_cast<float32_t>(i::Smi::ToInt(*value)));
- }
- if (value->IsHeapNumber()) {
- return Val(i::DoubleToFloat32(i::HeapNumber::cast(*value).value()));
- }
- value = i::Object::ToNumber(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case F64:
- do {
- if (value->IsSmi()) {
- return Val(static_cast<float64_t>(i::Smi::ToInt(*value)));
- }
- if (value->IsHeapNumber()) {
- return Val(i::HeapNumber::cast(*value).value());
- }
- value = i::Object::ToNumber(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case ANYREF:
- case FUNCREF: {
- if (value->IsNull(isolate)) {
- return Val(nullptr);
- } else {
- WASM_UNIMPLEMENTED("ref value");
- }
- }
- }
-}
-
i::Handle<i::String> VecToString(i::Isolate* isolate,
const vec<byte_t>& chars) {
+ size_t length = chars.size();
+ // Some, but not all, {chars} vectors we get here are null-terminated,
+ // so let's be robust to that.
+ if (length > 0 && chars[length - 1] == 0) length--;
return isolate->factory()
- ->NewStringFromUtf8({chars.get(), chars.size()})
+ ->NewStringFromUtf8({chars.get(), length})
.ToHandleChecked();
}
@@ -1327,11 +813,12 @@ Foreign::~Foreign() {}
auto Foreign::copy() const -> own<Foreign*> { return impl(this)->copy(); }
auto Foreign::make(Store* store_abs) -> own<Foreign*> {
- auto store = impl(store_abs);
- auto isolate = store->i_isolate();
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
- auto obj = i::Handle<i::JSReceiver>();
+ i::Handle<i::JSObject> obj =
+ isolate->factory()->NewJSObject(isolate->object_function());
return implement<Foreign>::type::make(store, obj);
}
@@ -1379,22 +866,37 @@ auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module*> {
}
auto Module::imports() const -> vec<ImportType*> {
- i::Vector<const uint8_t> wire_bytes =
- impl(this)->v8_object()->native_module()->wire_bytes();
- vec<const byte_t> binary = vec<const byte_t>::adopt(
- wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
- auto imports = wasm::bin::imports(binary);
- binary.release();
+ const i::wasm::NativeModule* native_module =
+ impl(this)->v8_object()->native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const std::vector<i::wasm::WasmImport>& import_table = module->import_table;
+ size_t size = import_table.size();
+ vec<ImportType*> imports = vec<ImportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; i++) {
+ const i::wasm::WasmImport& imp = import_table[i];
+ Name module_name = GetNameFromWireBytes(imp.module_name, wire_bytes);
+ Name name = GetNameFromWireBytes(imp.field_name, wire_bytes);
+ own<ExternType*> type = GetImportExportType(module, imp.kind, imp.index);
+ imports[i] = ImportType::make(std::move(module_name), std::move(name),
+ std::move(type));
+ }
return imports;
}
vec<ExportType*> ExportsImpl(i::Handle<i::WasmModuleObject> module_obj) {
- i::Vector<const uint8_t> wire_bytes =
- module_obj->native_module()->wire_bytes();
- vec<const byte_t> binary = vec<const byte_t>::adopt(
- wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
- auto exports = wasm::bin::exports(binary);
- binary.release();
+ const i::wasm::NativeModule* native_module = module_obj->native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const std::vector<i::wasm::WasmExport>& export_table = module->export_table;
+ size_t size = export_table.size();
+ vec<ExportType*> exports = vec<ExportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; i++) {
+ const i::wasm::WasmExport& exp = export_table[i];
+ Name name = GetNameFromWireBytes(exp.name, wire_bytes);
+ own<ExternType*> type = GetImportExportType(module, exp.kind, exp.index);
+ exports[i] = ExportType::make(std::move(name), std::move(type));
+ }
return exports;
}
@@ -1430,7 +932,7 @@ auto Module::deserialize(Store* store_abs, const vec<byte_t>& serialized)
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
const byte_t* ptr = serialized.get();
- uint64_t binary_size = wasm::bin::u64(ptr);
+ uint64_t binary_size = ReadLebU64(&ptr);
ptrdiff_t size_size = ptr - serialized.get();
size_t serial_size = serialized.size() - size_size - binary_size;
i::Handle<i::WasmModuleObject> module_obj;
@@ -1597,16 +1099,14 @@ class SignatureHelper : public i::AllStatic {
int index = 0;
// TODO(jkummerow): Consider making vec<> range-based for-iterable.
for (size_t i = 0; i < type->results().size(); i++) {
- sig->set(index++,
- v8::wasm::wasm_valtype_to_v8(type->results()[i]->kind()));
+ sig->set(index++, WasmValKindToV8(type->results()[i]->kind()));
}
// {sig->set} needs to take the address of its second parameter,
// so we can't pass in the static const kMarker directly.
i::wasm::ValueType marker = kMarker;
sig->set(index++, marker);
for (size_t i = 0; i < type->params().size(); i++) {
- sig->set(index++,
- v8::wasm::wasm_valtype_to_v8(type->params()[i]->kind()));
+ sig->set(index++, WasmValKindToV8(type->params()[i]->kind()));
}
return sig;
}
@@ -1619,11 +1119,11 @@ class SignatureHelper : public i::AllStatic {
int i = 0;
for (; i < result_arity; ++i) {
- results[i] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ results[i] = ValType::make(V8ValueTypeToWasm(sig.get(i)));
}
i++; // Skip marker.
for (int p = 0; i < sig.length(); ++i, ++p) {
- params[p] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ params[p] = ValType::make(V8ValueTypeToWasm(sig.get(i)));
}
return FuncType::make(std::move(params), std::move(results));
}
@@ -1684,22 +1184,8 @@ auto Func::type() const -> own<FuncType*> {
DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
i::Handle<i::WasmExportedFunction> function =
i::Handle<i::WasmExportedFunction>::cast(func);
- i::wasm::FunctionSig* sig =
- function->instance().module()->functions[function->function_index()].sig;
- uint32_t param_arity = static_cast<uint32_t>(sig->parameter_count());
- uint32_t result_arity = static_cast<uint32_t>(sig->return_count());
- auto params = vec<ValType*>::make_uninitialized(param_arity);
- auto results = vec<ValType*>::make_uninitialized(result_arity);
-
- for (size_t i = 0; i < params.size(); ++i) {
- auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetParam(i));
- params[i] = ValType::make(kind);
- }
- for (size_t i = 0; i < results.size(); ++i) {
- auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(i));
- results[i] = ValType::make(kind);
- }
- return FuncType::make(std::move(params), std::move(results));
+ return FunctionSigToFuncType(
+ function->instance().module()->functions[function->function_index()].sig);
}
auto Func::param_arity() const -> size_t {
@@ -1728,74 +1214,183 @@ auto Func::result_arity() const -> size_t {
return sig->return_count();
}
+namespace {
+
+void PrepareFunctionData(i::Isolate* isolate,
+ i::Handle<i::WasmExportedFunctionData> function_data,
+ i::wasm::FunctionSig* sig) {
+ // If the data is already populated, return immediately.
+ if (!function_data->c_wrapper_code().IsSmi()) return;
+ // Compile wrapper code.
+ i::Handle<i::Code> wrapper_code =
+ i::compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
+ function_data->set_c_wrapper_code(*wrapper_code);
+ // Compute packed args size.
+ function_data->set_packed_args_size(
+ i::wasm::CWasmArgumentsPacker::TotalSize(sig));
+ // Get call target (function table offset). This is an Address, we store
+ // it as a pseudo-Smi by shifting it by one bit, so the GC leaves it alone.
+ i::Address call_target =
+ function_data->instance().GetCallTarget(function_data->function_index());
+ i::Smi smi_target((call_target << i::kSmiTagSize) | i::kSmiTag);
+ function_data->set_wasm_call_target(smi_target);
+}
+
+void PushArgs(i::wasm::FunctionSig* sig, const Val args[],
+ i::wasm::CWasmArgumentsPacker* packer) {
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ i::wasm::ValueType type = sig->GetParam(i);
+ switch (type) {
+ case i::wasm::kWasmI32:
+ packer->Push(args[i].i32());
+ break;
+ case i::wasm::kWasmI64:
+ packer->Push(args[i].i64());
+ break;
+ case i::wasm::kWasmF32:
+ packer->Push(args[i].f32());
+ break;
+ case i::wasm::kWasmF64:
+ packer->Push(args[i].f64());
+ break;
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef:
+ packer->Push(impl(args[i].ref())->v8_object()->ptr());
+ break;
+ case i::wasm::kWasmExnRef:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+void PopArgs(i::wasm::FunctionSig* sig, Val results[],
+ i::wasm::CWasmArgumentsPacker* packer, StoreImpl* store) {
+ packer->Reset();
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ i::wasm::ValueType type = sig->GetReturn(i);
+ switch (type) {
+ case i::wasm::kWasmI32:
+ results[i] = Val(packer->Pop<int32_t>());
+ break;
+ case i::wasm::kWasmI64:
+ results[i] = Val(packer->Pop<int64_t>());
+ break;
+ case i::wasm::kWasmF32:
+ results[i] = Val(packer->Pop<float>());
+ break;
+ case i::wasm::kWasmF64:
+ results[i] = Val(packer->Pop<double>());
+ break;
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef: {
+ i::Address raw = packer->Pop<i::Address>();
+ if (raw == i::kNullAddress) {
+ results[i] = Val(nullptr);
+ } else {
+ i::JSReceiver raw_obj = i::JSReceiver::cast(i::Object(raw));
+ i::Handle<i::JSReceiver> obj(raw_obj, store->i_isolate());
+ results[i] = Val(implement<Ref>::type::make(store, obj));
+ }
+ break;
+ }
+ case i::wasm::kWasmExnRef:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+own<Trap*> CallWasmCapiFunction(i::WasmCapiFunctionData data, const Val args[],
+ Val results[]) {
+ FuncData* func_data = reinterpret_cast<FuncData*>(data.embedder_data());
+ if (func_data->kind == FuncData::kCallback) {
+ return (func_data->callback)(args, results);
+ }
+ DCHECK(func_data->kind == FuncData::kCallbackWithEnv);
+ return (func_data->callback_with_env)(func_data->env, args, results);
+}
+
+} // namespace
+
auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
auto func = impl(this);
auto store = func->store();
- auto isolate = store->isolate();
- auto i_isolate = store->i_isolate();
- v8::HandleScope handle_scope(isolate);
-
- int num_params;
- int num_results;
- ValKind result_kind;
- i::Handle<i::JSFunction> v8_func = func->v8_object();
- if (i::WasmExportedFunction::IsWasmExportedFunction(*v8_func)) {
- i::WasmExportedFunction wef = i::WasmExportedFunction::cast(*v8_func);
- i::wasm::FunctionSig* sig =
- wef.instance().module()->functions[wef.function_index()].sig;
- num_params = static_cast<int>(sig->parameter_count());
- num_results = static_cast<int>(sig->return_count());
- if (num_results > 0) {
- result_kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(0));
- }
-#if DEBUG
- for (int i = 0; i < num_params; i++) {
- DCHECK_EQ(args[i].kind(), v8::wasm::v8_valtype_to_wasm(sig->GetParam(i)));
+ auto isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+ i::Object raw_function_data = func->v8_object()->shared().function_data();
+
+ // WasmCapiFunctions can be called directly.
+ if (raw_function_data.IsWasmCapiFunctionData()) {
+ return CallWasmCapiFunction(
+ i::WasmCapiFunctionData::cast(raw_function_data), args, results);
+ }
+
+ DCHECK(raw_function_data.IsWasmExportedFunctionData());
+ i::Handle<i::WasmExportedFunctionData> function_data(
+ i::WasmExportedFunctionData::cast(raw_function_data), isolate);
+ i::Handle<i::WasmInstanceObject> instance(function_data->instance(), isolate);
+ int function_index = function_data->function_index();
+ // Caching {sig} would give a ~10% reduction in overhead.
+ i::wasm::FunctionSig* sig = instance->module()->functions[function_index].sig;
+ PrepareFunctionData(isolate, function_data, sig);
+ i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
+ i::Code::cast(function_data->c_wrapper_code()), isolate);
+ i::Address call_target =
+ function_data->wasm_call_target().ptr() >> i::kSmiTagSize;
+
+ i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
+ PushArgs(sig, args, &packer);
+
+ i::Handle<i::Object> object_ref = instance;
+ if (function_index <
+ static_cast<int>(instance->module()->num_imported_functions)) {
+ object_ref = i::handle(
+ instance->imported_function_refs().get(function_index), isolate);
+ if (object_ref->IsTuple2()) {
+ i::JSFunction jsfunc =
+ i::JSFunction::cast(i::Tuple2::cast(*object_ref).value2());
+ i::Object data = jsfunc.shared().function_data();
+ if (data.IsWasmCapiFunctionData()) {
+ return CallWasmCapiFunction(i::WasmCapiFunctionData::cast(data), args,
+ results);
+ }
+ // TODO(jkummerow): Imported and then re-exported JavaScript functions
+ // are not supported yet. If we support C-API + JavaScript, we'll need
+ // to call those here.
+ UNIMPLEMENTED();
+ } else {
+ // A WasmFunction from another module.
+ DCHECK(object_ref->IsWasmInstanceObject());
}
-#endif
- } else {
- DCHECK(i::WasmCapiFunction::IsWasmCapiFunction(*v8_func));
- UNIMPLEMENTED();
- }
- // TODO(rossberg): cache v8_args array per thread.
- auto v8_args = std::unique_ptr<i::Handle<i::Object>[]>(
- new (std::nothrow) i::Handle<i::Object>[num_params]);
- for (int i = 0; i < num_params; ++i) {
- v8_args[i] = v8::Utils::OpenHandle(*val_to_v8(store, args[i]));
- }
-
- // TODO(jkummerow): Use Execution::TryCall instead of manual TryCatch.
- v8::TryCatch handler(isolate);
- i::MaybeHandle<i::Object> maybe_val = i::Execution::Call(
- i_isolate, func->v8_object(), i_isolate->factory()->undefined_value(),
- num_params, v8_args.get());
-
- if (handler.HasCaught()) {
- i_isolate->OptionalRescheduleException(true);
- i::Handle<i::Object> exception =
- v8::Utils::OpenHandle(*handler.Exception());
+ }
+
+ i::Execution::CallWasm(isolate, wrapper_code, call_target, object_ref,
+ packer.argv());
+
+ if (isolate->has_pending_exception()) {
+ i::Handle<i::Object> exception(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
if (!exception->IsJSReceiver()) {
i::MaybeHandle<i::String> maybe_string =
- i::Object::ToString(i_isolate, exception);
+ i::Object::ToString(isolate, exception);
i::Handle<i::String> string = maybe_string.is_null()
- ? i_isolate->factory()->empty_string()
+ ? isolate->factory()->empty_string()
: maybe_string.ToHandleChecked();
exception =
- i_isolate->factory()->NewError(i_isolate->error_function(), string);
+ isolate->factory()->NewError(isolate->error_function(), string);
}
return implement<Trap>::type::make(
store, i::Handle<i::JSReceiver>::cast(exception));
}
- auto val = maybe_val.ToHandleChecked();
- if (num_results == 0) {
- assert(val->IsUndefined(i_isolate));
- } else if (num_results == 1) {
- assert(!val->IsUndefined(i_isolate));
- new (&results[0]) Val(v8_to_val(i_isolate, val, result_kind));
- } else {
- WASM_UNIMPLEMENTED("multiple results");
- }
+ PopArgs(sig, results, &packer, store);
return nullptr;
}
@@ -1814,24 +1409,24 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
for (int i = 0; i < num_param_types; ++i) {
switch (param_types[i]->kind()) {
case I32:
- params[i] = Val(i::ReadUnalignedValue<int32_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<int32_t>(p));
p += 4;
break;
case I64:
- params[i] = Val(i::ReadUnalignedValue<int64_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<int64_t>(p));
p += 8;
break;
case F32:
- params[i] = Val(i::ReadUnalignedValue<float32_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<float32_t>(p));
p += 4;
break;
case F64:
- params[i] = Val(i::ReadUnalignedValue<float64_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<float64_t>(p));
p += 8;
break;
case ANYREF:
case FUNCREF: {
- i::Address raw = i::ReadUnalignedValue<i::Address>(p);
+ i::Address raw = v8::base::ReadUnalignedValue<i::Address>(p);
p += sizeof(raw);
if (raw == i::kNullAddress) {
params[i] = Val(nullptr);
@@ -1864,27 +1459,28 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
for (int i = 0; i < num_result_types; ++i) {
switch (result_types[i]->kind()) {
case I32:
- i::WriteUnalignedValue(p, results[i].i32());
+ v8::base::WriteUnalignedValue(p, results[i].i32());
p += 4;
break;
case I64:
- i::WriteUnalignedValue(p, results[i].i64());
+ v8::base::WriteUnalignedValue(p, results[i].i64());
p += 8;
break;
case F32:
- i::WriteUnalignedValue(p, results[i].f32());
+ v8::base::WriteUnalignedValue(p, results[i].f32());
p += 4;
break;
case F64:
- i::WriteUnalignedValue(p, results[i].f64());
+ v8::base::WriteUnalignedValue(p, results[i].f64());
p += 8;
break;
case ANYREF:
case FUNCREF: {
if (results[i].ref() == nullptr) {
- i::WriteUnalignedValue(p, i::kNullAddress);
+ v8::base::WriteUnalignedValue(p, i::kNullAddress);
} else {
- i::WriteUnalignedValue(p, impl(results[i].ref())->v8_object()->ptr());
+ v8::base::WriteUnalignedValue(
+ p, impl(results[i].ref())->v8_object()->ptr());
}
p += sizeof(i::Address);
break;
@@ -1917,8 +1513,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
DCHECK_EQ(type->content()->kind(), val.kind());
- i::wasm::ValueType i_type =
- v8::wasm::wasm_valtype_to_v8(type->content()->kind());
+ i::wasm::ValueType i_type = WasmValKindToV8(type->content()->kind());
bool is_mutable = (type->mutability() == VAR);
const int32_t offset = 0;
i::Handle<i::WasmGlobalObject> obj =
@@ -1935,7 +1530,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
auto Global::type() const -> own<GlobalType*> {
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
- ValKind kind = v8::wasm::v8_valtype_to_wasm(v8_global->type());
+ ValKind kind = V8ValueTypeToWasm(v8_global->type());
Mutability mutability = v8_global->is_mutable() ? VAR : CONST;
return GlobalType::make(ValType::make(kind), mutability);
}
@@ -1951,9 +1546,16 @@ auto Global::get() const -> Val {
return Val(v8_global->GetF32());
case F64:
return Val(v8_global->GetF64());
- case ANYREF:
- case FUNCREF:
- WASM_UNIMPLEMENTED("globals of reference type");
+ case ANYREF: {
+ i::Handle<i::JSReceiver> obj =
+ i::Handle<i::JSReceiver>::cast(v8_global->GetRef());
+ return Val(RefImpl<Ref, i::JSReceiver>::make(impl(this)->store(), obj));
+ }
+ case FUNCREF: {
+ i::Handle<i::JSFunction> obj =
+ i::Handle<i::JSFunction>::cast(v8_global->GetRef());
+ return Val(implement<Func>::type::make(impl(this)->store(), obj));
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -1972,8 +1574,14 @@ void Global::set(const Val& val) {
case F64:
return v8_global->SetF64(val.f64());
case ANYREF:
- case FUNCREF:
- WASM_UNIMPLEMENTED("globals of reference type");
+ return v8_global->SetAnyRef(impl(val.ref())->v8_object());
+ case FUNCREF: {
+ bool result = v8_global->SetFuncRef(impl(this)->store()->i_isolate(),
+ impl(val.ref())->v8_object());
+ DCHECK(result);
+ USE(result);
+ return;
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -2002,7 +1610,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
i::wasm::ValueType i_type;
switch (type->element()->kind()) {
case FUNCREF:
- i_type = i::wasm::kWasmAnyFunc;
+ i_type = i::wasm::kWasmFuncRef;
break;
case ANYREF:
if (enabled_features.anyref) {
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 440267bd25..abb7b8ee86 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -10,8 +10,8 @@
#include <memory>
#include "src/base/compiler-specific.h"
+#include "src/base/memory.h"
#include "src/codegen/signature.h"
-#include "src/common/v8memory.h"
#include "src/flags/flags.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -299,7 +299,7 @@ class Decoder {
} else if (!validate_size(pc, sizeof(IntType), msg)) {
return IntType{0};
}
- return ReadLittleEndianValue<IntType>(reinterpret_cast<Address>(pc));
+ return base::ReadLittleEndianValue<IntType>(reinterpret_cast<Address>(pc));
}
template <typename IntType>
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index eb895a25b3..9f1ca23c62 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -13,6 +13,7 @@
#include "src/utils/bit-vector.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -64,7 +65,7 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \
V(AtomicNotify, Uint32) \
V(I32AtomicWait, Uint32) \
- V(I64AtomicWait, Uint32) \
+ V(I64AtomicWait, Uint64) \
V(I32AtomicLoad, Uint32) \
V(I64AtomicLoad, Uint64) \
V(I32AtomicLoad8U, Uint8) \
@@ -229,17 +230,17 @@ inline bool decode_local_type(uint8_t val, ValueType* result) {
case kLocalS128:
*result = kWasmS128;
return true;
- case kLocalAnyFunc:
- *result = kWasmAnyFunc;
+ case kLocalFuncRef:
+ *result = kWasmFuncRef;
return true;
case kLocalAnyRef:
*result = kWasmAnyRef;
return true;
- case kLocalExceptRef:
- *result = kWasmExceptRef;
+ case kLocalExnRef:
+ *result = kWasmExnRef;
return true;
default:
- *result = kWasmVar;
+ *result = kWasmBottom;
return false;
}
}
@@ -296,20 +297,20 @@ struct BlockTypeImmediate {
}
uint32_t in_arity() const {
- if (type != kWasmVar) return 0;
+ if (type != kWasmBottom) return 0;
return static_cast<uint32_t>(sig->parameter_count());
}
uint32_t out_arity() const {
if (type == kWasmStmt) return 0;
- if (type != kWasmVar) return 1;
+ if (type != kWasmBottom) return 1;
return static_cast<uint32_t>(sig->return_count());
}
ValueType in_type(uint32_t index) {
- DCHECK_EQ(kWasmVar, type);
+ DCHECK_EQ(kWasmBottom, type);
return sig->GetParam(index);
}
ValueType out_type(uint32_t index) {
- if (type == kWasmVar) return sig->GetReturn(index);
+ if (type == kWasmBottom) return sig->GetReturn(index);
DCHECK_NE(kWasmStmt, type);
DCHECK_EQ(0, index);
return type;
@@ -573,14 +574,14 @@ struct ElemDropImmediate {
template <Decoder::ValidateFlag validate>
struct TableCopyImmediate {
- TableIndexImmediate<validate> table_src;
TableIndexImmediate<validate> table_dst;
+ TableIndexImmediate<validate> table_src;
unsigned length = 0;
inline TableCopyImmediate(Decoder* decoder, const byte* pc) {
- table_src = TableIndexImmediate<validate>(decoder, pc + 1);
- table_dst =
- TableIndexImmediate<validate>(decoder, pc + 1 + table_src.length);
+ table_dst = TableIndexImmediate<validate>(decoder, pc + 1);
+ table_src =
+ TableIndexImmediate<validate>(decoder, pc + 1 + table_dst.length);
length = table_src.length + table_dst.length;
}
};
@@ -718,9 +719,9 @@ struct ControlBase {
const LocalIndexImmediate<validate>& imm) \
F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(GetTable, const Value& index, Value* result, \
+ F(TableGet, const Value& index, Value* result, \
const TableIndexImmediate<validate>& imm) \
- F(SetTable, const Value& index, const Value& value, \
+ F(TableSet, const Value& index, const Value& value, \
const TableIndexImmediate<validate>& imm) \
F(Unreachable) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
@@ -759,6 +760,7 @@ struct ControlBase {
Vector<Value> values) \
F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(AtomicFence) \
F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
const Value& src, const Value& size) \
F(DataDrop, const DataDropImmediate<validate>& imm) \
@@ -849,18 +851,18 @@ class WasmDecoder : public Decoder {
}
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
- case kLocalAnyFunc:
+ case kLocalFuncRef:
if (enabled.anyref) {
- type = kWasmAnyFunc;
+ type = kWasmFuncRef;
break;
}
decoder->error(decoder->pc() - 1,
- "local type 'anyfunc' is not enabled with "
+ "local type 'funcref' is not enabled with "
"--experimental-wasm-anyref");
return false;
- case kLocalExceptRef:
+ case kLocalExnRef:
if (enabled.eh) {
- type = kWasmExceptRef;
+ type = kWasmExnRef;
break;
}
decoder->error(decoder->pc() - 1, "invalid local type");
@@ -1015,8 +1017,8 @@ class WasmDecoder : public Decoder {
return false;
}
if (!VALIDATE(module_ != nullptr &&
- module_->tables[imm.table_index].type == kWasmAnyFunc)) {
- error("table of call_indirect must be of type anyfunc");
+ module_->tables[imm.table_index].type == kWasmFuncRef)) {
+ error("table of call_indirect must be of type funcref");
return false;
}
if (!Complete(pc, imm)) {
@@ -1049,6 +1051,12 @@ class WasmDecoder : public Decoder {
SimdLaneImmediate<validate>& imm) {
uint8_t num_lanes = 0;
switch (opcode) {
+ case kExprF64x2ExtractLane:
+ case kExprF64x2ReplaceLane:
+ case kExprI64x2ExtractLane:
+ case kExprI64x2ReplaceLane:
+ num_lanes = 2;
+ break;
case kExprF32x4ExtractLane:
case kExprF32x4ReplaceLane:
case kExprI32x4ExtractLane:
@@ -1079,6 +1087,11 @@ class WasmDecoder : public Decoder {
SimdShiftImmediate<validate>& imm) {
uint8_t max_shift = 0;
switch (opcode) {
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU:
+ max_shift = 64;
+ break;
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
@@ -1121,7 +1134,7 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(BlockTypeImmediate<validate>& imm) {
- if (imm.type != kWasmVar) return true;
+ if (imm.type != kWasmBottom) return true;
if (!VALIDATE(module_ && imm.sig_index < module_->signatures.size())) {
return false;
}
@@ -1238,8 +1251,8 @@ class WasmDecoder : public Decoder {
GlobalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
- case kExprGetTable:
- case kExprSetTable: {
+ case kExprTableGet:
+ case kExprTableSet: {
TableIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1405,6 +1418,12 @@ class WasmDecoder : public Decoder {
MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX);
return 2 + imm.length;
}
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ return 2 + 1;
+ }
default:
decoder->error(pc, "invalid Atomics opcode");
return 2;
@@ -1428,11 +1447,11 @@ class WasmDecoder : public Decoder {
case kExprSelect:
case kExprSelectWithType:
return {3, 1};
- case kExprSetTable:
+ case kExprTableSet:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
return {2, 0};
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- case kExprGetTable:
+ case kExprTableGet:
case kExprTeeLocal:
case kExprMemoryGrow:
return {1, 1};
@@ -1536,7 +1555,6 @@ template <Decoder::ValidateFlag validate, typename Interface>
class WasmFullDecoder : public WasmDecoder<validate> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
- using MergeValues = Merge<Value>;
using ArgVector = base::SmallVector<Value, 8>;
// All Value types should be trivially copyable for performance. We push, pop,
@@ -1658,7 +1676,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
static Value UnreachableValue(const uint8_t* pc) {
- return Value{pc, kWasmVar};
+ return Value{pc, kWasmBottom};
}
bool CheckHasMemory() {
@@ -1760,7 +1778,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprRethrow: {
CHECK_PROTOTYPE_OPCODE(eh);
- auto exception = Pop(0, kWasmExceptRef);
+ auto exception = Pop(0, kWasmExnRef);
CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
EndControl();
break;
@@ -1806,7 +1824,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
FallThruTo(c);
stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
c->reachability = control_at(1)->innerReachability();
- auto* exception = Push(kWasmExceptRef);
+ auto* exception = Push(kWasmExnRef);
CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
break;
}
@@ -1816,7 +1834,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
Control* c = control_at(imm.depth.depth);
- auto exception = Pop(0, kWasmExceptRef);
+ auto exception = Pop(0, kWasmExnRef);
const WasmExceptionSig* sig = imm.index.exception->sig;
size_t value_count = sig->parameter_count();
// TODO(mstarzinger): This operand stack mutation is an ugly hack to
@@ -1825,15 +1843,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// special handling for both and do minimal/no stack mutation here.
for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
Vector<Value> values(stack_.data() + c->stack_depth, value_count);
- if (!TypeCheckBranch(c)) break;
- if (control_.back().reachable()) {
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
values);
c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
for (size_t i = 0; i < value_count; ++i) Pop();
- auto* pexception = Push(kWasmExceptRef);
+ auto* pexception = Push(kWasmExnRef);
*pexception = exception;
break;
}
@@ -1875,7 +1895,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error(this->pc_, "else already present for if");
break;
}
- if (!TypeCheckFallThru(c)) break;
+ if (!TypeCheckFallThru()) break;
c->kind = kControlIfElse;
CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
if (c->reachable()) c->end_merge.reached = true;
@@ -1902,7 +1922,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- if (!TypeCheckFallThru(c)) break;
+ if (!TypeCheckFallThru()) break;
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
@@ -1917,7 +1937,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
control_.clear();
break;
}
-
PopControl(c);
break;
}
@@ -1925,8 +1944,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto cond = Pop(2, kWasmI32);
auto fval = Pop();
auto tval = Pop(0, fval.type);
- ValueType type = tval.type == kWasmVar ? fval.type : tval.type;
- if (ValueTypes::IsSubType(kWasmAnyRef, type)) {
+ ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
+ if (ValueTypes::IsSubType(type, kWasmAnyRef)) {
this->error(
"select without type is only valid for value type inputs");
break;
@@ -1951,12 +1970,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchDepthImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm, control_.size())) break;
Control* c = control_at(imm.depth);
- if (!TypeCheckBranch(c)) break;
- if (imm.depth == control_.size() - 1) {
- DoReturn();
- } else if (control_.back().reachable()) {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
+ }
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
EndControl();
@@ -1968,10 +1991,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (this->failed()) break;
if (!this->Validate(this->pc_, imm, control_.size())) break;
Control* c = control_at(imm.depth);
- if (!TypeCheckBranch(c)) break;
- if (control_.back().reachable()) {
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrIf, cond, imm.depth);
c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
break;
@@ -1982,42 +2007,45 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto key = Pop(0, kWasmI32);
if (this->failed()) break;
if (!this->Validate(this->pc_, imm, control_.size())) break;
- uint32_t br_arity = 0;
+
+ // Cache the branch targets during the iteration, so that we can set
+ // all branch targets as reachable after the {CALL_INTERFACE} call.
std::vector<bool> br_targets(control_.size());
+
+ // The result types of the br_table instruction. We have to check the
+ // stack against these types. Only needed during validation.
+ std::vector<ValueType> result_types;
+
while (iterator.has_next()) {
- const uint32_t i = iterator.cur_index();
+ const uint32_t index = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
- if (!VALIDATE(target < control_.size())) {
- this->errorf(pos,
- "improper branch in br_table target %u (depth %u)",
- i, target);
- break;
- }
+ if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) break;
// Avoid redundant branch target checks.
if (br_targets[target]) continue;
br_targets[target] = true;
- // Check that label types match up.
- Control* c = control_at(target);
- uint32_t arity = c->br_merge()->arity;
- if (i == 0) {
- br_arity = arity;
- } else if (!VALIDATE(br_arity == arity)) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u"
- " (previous was %u, this one %u)",
- i, br_arity, arity);
+
+ if (validate) {
+ if (index == 0) {
+ // With the first branch target, initialize the result types.
+ result_types = InitializeBrTableResultTypes(target);
+ } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
+ index)) {
+ break;
+ }
}
- if (!TypeCheckBranch(c)) break;
}
- if (this->failed()) break;
+
+ if (!VALIDATE(TypeCheckBrTable(result_types))) break;
+
+ DCHECK(this->ok());
if (control_.back().reachable()) {
CALL_INTERFACE(BrTable, imm, key);
- for (uint32_t depth = control_depth(); depth-- > 0;) {
- if (!br_targets[depth]) continue;
- control_at(depth)->br_merge()->reached = true;
+ for (int i = 0, e = control_depth(); i < e; ++i) {
+ if (!br_targets[i]) continue;
+ control_at(i)->br_merge()->reached = true;
}
}
@@ -2026,8 +2054,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprReturn: {
- if (!TypeCheckReturn()) break;
- DoReturn();
+ if (V8_LIKELY(control_.back().reachable())) {
+ if (!VALIDATE(TypeCheckReturn())) break;
+ DoReturn();
+ } else {
+ // We pop all return values from the stack to check their type.
+ // Since we deal with unreachable code, we do not have to keep the
+ // values.
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ for (int i = 0; i < num_returns; ++i) {
+ Pop(i, this->sig_->GetReturn(i));
+ }
+ }
+
EndControl();
break;
}
@@ -2075,7 +2114,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(anyref);
FunctionIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(kWasmAnyFunc);
+ auto* value = Push(kWasmFuncRef);
CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
len = 1 + imm.length;
break;
@@ -2131,7 +2170,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
break;
}
- case kExprGetTable: {
+ case kExprTableGet: {
CHECK_PROTOTYPE_OPCODE(anyref);
TableIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
@@ -2139,17 +2178,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_NOT_NULL(this->module_);
auto index = Pop(0, kWasmI32);
auto* result = Push(this->module_->tables[imm.index].type);
- CALL_INTERFACE_IF_REACHABLE(GetTable, index, result, imm);
+ CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
break;
}
- case kExprSetTable: {
+ case kExprTableSet: {
CHECK_PROTOTYPE_OPCODE(anyref);
TableIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(1, this->module_->tables[imm.index].type);
auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(SetTable, index, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
break;
}
@@ -2328,7 +2367,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kAtomicPrefix: {
CHECK_PROTOTYPE_OPCODE(threads);
- if (!CheckHasSharedMemory()) break;
len++;
byte atomic_index =
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
@@ -2348,8 +2386,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
default: {
// Deal with special asmjs opcodes.
- if (this->module_ != nullptr &&
- this->module_->origin == kAsmJsOrigin) {
+ if (this->module_ != nullptr && is_asmjs_module(this->module_)) {
FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) {
BuildSimpleOperator(opcode, sig);
@@ -2520,6 +2557,90 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
+ bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
+ if (!VALIDATE(target < this->control_.size())) {
+ this->errorf(pos, "improper branch in br_table target %u (depth %u)",
+ index, target);
+ return false;
+ }
+ return true;
+ }
+
+ std::vector<ValueType> InitializeBrTableResultTypes(uint32_t target) {
+ auto* merge = control_at(target)->br_merge();
+ int br_arity = merge->arity;
+ std::vector<ValueType> result(br_arity);
+ for (int i = 0; i < br_arity; ++i) {
+ result[i] = (*merge)[i].type;
+ }
+ return result;
+ }
+
+ bool UpdateBrTableResultTypes(std::vector<ValueType>* result_types,
+ uint32_t target, const byte* pos, int index) {
+ auto* merge = control_at(target)->br_merge();
+ int br_arity = merge->arity;
+ // First we check if the arities match.
+ if (br_arity != static_cast<int>(result_types->size())) {
+ this->errorf(pos,
+ "inconsistent arity in br_table target %u (previous was "
+ "%zu, this one is %u)",
+ index, result_types->size(), br_arity);
+ return false;
+ }
+
+ for (int i = 0; i < br_arity; ++i) {
+ if (this->enabled_.anyref) {
+ // The expected type is the biggest common sub type of all targets.
+ (*result_types)[i] =
+ ValueTypes::CommonSubType((*result_types)[i], (*merge)[i].type);
+ } else {
+ // All target must have the same signature.
+ if ((*result_types)[i] != (*merge)[i].type) {
+ this->errorf(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, ValueTypes::TypeName((*result_types)[i]),
+ ValueTypes::TypeName((*merge)[i].type));
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool TypeCheckBrTable(const std::vector<ValueType>& result_types) {
+ int br_arity = static_cast<int>(result_types.size());
+ if (V8_LIKELY(control_.back().reachable())) {
+ int available =
+ static_cast<int>(stack_.size()) - control_.back().stack_depth;
+ // There have to be enough values on the stack.
+ if (available < br_arity) {
+ this->errorf(this->pc_,
+ "expected %u elements on the stack for branch to "
+ "@%d, found %u",
+ br_arity, startrel(control_.back().pc), available);
+ return false;
+ }
+ Value* stack_values = &*(stack_.end() - br_arity);
+ // Type-check the topmost br_arity values on the stack.
+ for (int i = 0; i < br_arity; ++i) {
+ Value& val = stack_values[i];
+ if (!ValueTypes::IsSubType(val.type, result_types[i])) {
+ this->errorf(this->pc_,
+ "type error in merge[%u] (expected %s, got %s)", i,
+ ValueTypes::TypeName(result_types[i]),
+ ValueTypes::TypeName(val.type));
+ return false;
+ }
+ }
+ } else { // !control_.back().reachable()
+ // Pop values from the stack, accoring to the expected signature.
+ for (int i = 0; i < br_arity; ++i) Pop(i + 1, result_types[i]);
+ }
+ return this->ok();
+ }
+
uint32_t SimdExtractLane(WasmOpcode opcode, ValueType type) {
SimdLaneImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, opcode, imm)) {
@@ -2570,26 +2691,45 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t DecodeSimdOpcode(WasmOpcode opcode) {
uint32_t len = 0;
switch (opcode) {
+ case kExprF64x2ExtractLane: {
+ len = SimdExtractLane(opcode, kWasmF64);
+ break;
+ }
case kExprF32x4ExtractLane: {
len = SimdExtractLane(opcode, kWasmF32);
break;
}
+ case kExprI64x2ExtractLane: {
+ len = SimdExtractLane(opcode, kWasmI64);
+ break;
+ }
case kExprI32x4ExtractLane:
case kExprI16x8ExtractLane:
case kExprI8x16ExtractLane: {
len = SimdExtractLane(opcode, kWasmI32);
break;
}
+ case kExprF64x2ReplaceLane: {
+ len = SimdReplaceLane(opcode, kWasmF64);
+ break;
+ }
case kExprF32x4ReplaceLane: {
len = SimdReplaceLane(opcode, kWasmF32);
break;
}
+ case kExprI64x2ReplaceLane: {
+ len = SimdReplaceLane(opcode, kWasmI64);
+ break;
+ }
case kExprI32x4ReplaceLane:
case kExprI16x8ReplaceLane:
case kExprI8x16ReplaceLane: {
len = SimdReplaceLane(opcode, kWasmI32);
break;
}
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU:
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
@@ -2631,16 +2771,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t len = 0;
ValueType ret_type;
FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (sig != nullptr) {
- MachineType memtype;
- switch (opcode) {
+ if (!VALIDATE(sig != nullptr)) {
+ this->error("invalid atomic opcode");
+ return 0;
+ }
+ MachineType memtype;
+ switch (opcode) {
#define CASE_ATOMIC_STORE_OP(Name, Type) \
case kExpr##Name: { \
memtype = MachineType::Type(); \
ret_type = kWasmStmt; \
break; \
}
- ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
+ ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
#undef CASE_ATOMIC_OP
#define CASE_ATOMIC_OP(Name, Type) \
case kExpr##Name: { \
@@ -2648,22 +2791,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ret_type = GetReturnType(sig); \
break; \
}
- ATOMIC_OP_LIST(CASE_ATOMIC_OP)
+ ATOMIC_OP_LIST(CASE_ATOMIC_OP)
#undef CASE_ATOMIC_OP
- default:
- this->error("invalid atomic opcode");
+ case kExprAtomicFence: {
+ byte zero = this->template read_u8<validate>(this->pc_ + 2, "zero");
+ if (!VALIDATE(zero == 0)) {
+ this->error(this->pc_ + 2, "invalid atomic operand");
return 0;
+ }
+ CALL_INTERFACE_IF_REACHABLE(AtomicFence);
+ return 1;
}
- MemoryAccessImmediate<validate> imm(
- this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
- len += imm.length;
- auto args = PopArgs(sig);
- auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
- result);
- } else {
- this->error("invalid atomic opcode");
- }
+ default:
+ this->error("invalid atomic opcode");
+ return 0;
+ }
+ if (!CheckHasSharedMemory()) return 0;
+ MemoryAccessImmediate<validate> imm(
+ this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
+ len += imm.length;
+ auto args = PopArgs(sig);
+ auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
+ CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
return len;
}
@@ -2823,8 +2972,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (!VALIDATE(ValueTypes::IsSubType(expected, val.type) ||
- val.type == kWasmVar || expected == kWasmVar)) {
+ if (!VALIDATE(ValueTypes::IsSubType(val.type, expected) ||
+ val.type == kWasmBottom || expected == kWasmBottom)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index,
ValueTypes::TypeName(expected), SafeOpcodeNameAt(val.pc),
@@ -2849,11 +2998,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return val;
}
+ // Pops values from the stack, as defined by {merge}. Thereby we type-check
+ // unreachable merges. Afterwards the values are pushed again on the stack
+ // according to the signature in {merge}. This is done so follow-up validation
+ // is possible.
+ bool TypeCheckUnreachableMerge(Merge<Value>& merge, bool conditional_branch) {
+ int arity = merge.arity;
+ // For conditional branches, stack value '0' is the condition of the branch,
+ // and the result values start at index '1'.
+ int index_offset = conditional_branch ? 1 : 0;
+ for (int i = 0; i < arity; ++i) Pop(index_offset + i, merge[i].type);
+ // Push values of the correct type back on the stack.
+ for (int i = arity - 1; i >= 0; --i) Push(merge[i].type);
+ return this->ok();
+ }
+
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
- if (!TypeCheckFallThru(c)) return;
+ if (!TypeCheckFallThru()) return;
if (!c->reachable()) return;
if (!c->is_loop()) CALL_INTERFACE(FallThruTo, c);
@@ -2861,6 +3025,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TypeCheckMergeValues(Control* c, Merge<Value>* merge) {
+ // This is a CHECK instead of a DCHECK because {validate} is a constexpr,
+ // and a CHECK makes the whole function unreachable.
+ static_assert(validate, "Call this function only within VALIDATE");
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
// The computation of {stack_values} is only valid if {merge->arity} is >0.
@@ -2870,108 +3037,121 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (ValueTypes::IsSubType(old.type, val.type)) continue;
- // If {val.type} is polymorphic, which results from unreachable, make
- // it more specific by using the merge value's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
+ if (!ValueTypes::IsSubType(val.type, old.type)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
i, ValueTypes::TypeName(old.type),
ValueTypes::TypeName(val.type));
return false;
}
- val.type = old.type;
}
return true;
}
- bool TypeCheckFallThru(Control* c) {
- DCHECK_EQ(c, &control_.back());
- if (!validate) return true;
- uint32_t expected = c->end_merge.arity;
- DCHECK_GE(stack_.size(), c->stack_depth);
- uint32_t actual = static_cast<uint32_t>(stack_.size()) - c->stack_depth;
- // Fallthrus must match the arity of the control exactly.
- if (!InsertUnreachablesIfNecessary(expected, actual) || actual > expected) {
+ bool TypeCheckFallThru() {
+ Control& c = control_.back();
+ if (V8_LIKELY(c.reachable())) {
+ // We only do type-checking here. This is only needed during validation.
+ if (!validate) return true;
+
+ uint32_t expected = c.end_merge.arity;
+ DCHECK_GE(stack_.size(), c.stack_depth);
+ uint32_t actual = static_cast<uint32_t>(stack_.size()) - c.stack_depth;
+ // Fallthrus must match the arity of the control exactly.
+ if (actual != expected) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for fallthru to @%d, found %u",
+ expected, startrel(c.pc), actual);
+ return false;
+ }
+ if (expected == 0) return true; // Fast path.
+
+ return TypeCheckMergeValues(&c, &c.end_merge);
+ }
+
+ // Type-check an unreachable fallthru. First we do an arity check, then a
+ // type check. Note that type-checking may require an adjustment of the
+ // stack, if some stack values are missing to match the block signature.
+ Merge<Value>& merge = c.end_merge;
+ int arity = static_cast<int>(merge.arity);
+ int available = static_cast<int>(stack_.size()) - c.stack_depth;
+ // For fallthrus, not more than the needed values should be available.
+ if (available > arity) {
this->errorf(
this->pc_,
"expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c->pc), actual);
+ arity, startrel(c.pc), available);
return false;
}
- if (expected == 0) return true; // Fast path.
-
- return TypeCheckMergeValues(c, &c->end_merge);
+ // Pop all values from the stack for type checking of existing stack
+ // values.
+ return TypeCheckUnreachableMerge(merge, false);
}
- bool TypeCheckBranch(Control* c) {
- // Branches must have at least the number of values expected; can have more.
- uint32_t expected = c->br_merge()->arity;
- if (expected == 0) return true; // Fast path.
- DCHECK_GE(stack_.size(), control_.back().stack_depth);
- uint32_t actual =
- static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
- if (!InsertUnreachablesIfNecessary(expected, actual)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc), actual);
- return false;
+ enum TypeCheckBranchResult {
+ kReachableBranch,
+ kUnreachableBranch,
+ kInvalidStack,
+ };
+
+ TypeCheckBranchResult TypeCheckBranch(Control* c, bool conditional_branch) {
+ if (V8_LIKELY(control_.back().reachable())) {
+ // We only do type-checking here. This is only needed during validation.
+ if (!validate) return kReachableBranch;
+
+ // Branches must have at least the number of values expected; can have
+ // more.
+ uint32_t expected = c->br_merge()->arity;
+ if (expected == 0) return kReachableBranch; // Fast path.
+ DCHECK_GE(stack_.size(), control_.back().stack_depth);
+ uint32_t actual =
+ static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
+ if (expected > actual) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for br to @%d, found %u",
+ expected, startrel(c->pc), actual);
+ return kInvalidStack;
+ }
+ return TypeCheckMergeValues(c, c->br_merge()) ? kReachableBranch
+ : kInvalidStack;
}
- return TypeCheckMergeValues(c, c->br_merge());
+
+ return TypeCheckUnreachableMerge(*c->br_merge(), conditional_branch)
+ ? kUnreachableBranch
+ : kInvalidStack;
}
bool TypeCheckReturn() {
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ // No type checking is needed if there are no returns.
+ if (num_returns == 0) return true;
+
// Returns must have at least the number of values expected; can have more.
- uint32_t num_returns = static_cast<uint32_t>(this->sig_->return_count());
- DCHECK_GE(stack_.size(), control_.back().stack_depth);
- uint32_t actual =
- static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
- if (!InsertUnreachablesIfNecessary(num_returns, actual)) {
+ int num_available =
+ static_cast<int>(stack_.size()) - control_.back().stack_depth;
+ if (num_available < num_returns) {
this->errorf(this->pc_,
"expected %u elements on the stack for return, found %u",
- num_returns, actual);
+ num_returns, num_available);
return false;
}
// Typecheck the topmost {num_returns} values on the stack.
- if (num_returns == 0) return true;
// This line requires num_returns > 0.
Value* stack_values = &*(stack_.end() - num_returns);
- for (uint32_t i = 0; i < num_returns; ++i) {
+ for (int i = 0; i < num_returns; ++i) {
auto& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (ValueTypes::IsSubType(expected_type, val.type)) continue;
- // If {val.type} is polymorphic, which results from unreachable,
- // make it more specific by using the return's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
+ if (!ValueTypes::IsSubType(val.type, expected_type)) {
this->errorf(this->pc_,
"type error in return[%u] (expected %s, got %s)", i,
ValueTypes::TypeName(expected_type),
ValueTypes::TypeName(val.type));
return false;
}
- val.type = expected_type;
- }
- return true;
- }
-
- inline bool InsertUnreachablesIfNecessary(uint32_t expected,
- uint32_t actual) {
- if (V8_LIKELY(actual >= expected)) {
- return true; // enough actual values are there.
- }
- if (!VALIDATE(control_.back().unreachable())) {
- // There aren't enough values on the stack.
- return false;
}
- // A slow path. When the actual number of values on the stack is less
- // than the expected number of values and the current control is
- // unreachable, insert unreachable values below the actual values.
- // This simplifies {TypeCheckMergeValues}.
- auto pos = stack_.begin() + (stack_.size() - actual);
- stack_.insert(pos, expected - actual, UnreachableValue(this->pc_));
return true;
}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index c1e8e541b5..0568d61f3f 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -45,7 +45,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module, WasmFeatures* detected,
- FunctionBody& body) {
+ const FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
@@ -151,7 +151,12 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned length =
WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
+ unsigned offset = 1;
WasmOpcode opcode = i.current();
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = i.prefixed_opcode();
+ offset = 2;
+ }
if (line_numbers) line_numbers->push_back(i.position());
if (opcode == kExprElse || opcode == kExprCatch) {
control_depth--;
@@ -188,7 +193,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
#undef CASE_LOCAL_TYPE
} else {
- for (unsigned j = 1; j < length; ++j) {
+ for (unsigned j = offset; j < length; ++j) {
os << " 0x" << AsHex(i.pc()[j], 2) << ",";
}
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 16f90a41cb..eadc333dd5 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -38,7 +38,7 @@ V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module,
WasmFeatures* detected,
- FunctionBody& body);
+ const FunctionBody& body);
enum PrintLocals { kPrintLocals, kOmitLocals };
V8_EXPORT_PRIVATE
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index a5d7a08846..7df5abf5c8 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -4,9 +4,14 @@
#include "src/wasm/function-compiler.h"
+#include "src/codegen/compiler.h"
#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/wasm-code-manager.h"
@@ -107,12 +112,48 @@ ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
const WasmModule* module) {
// Liftoff does not support the special asm.js opcodes, thus always compile
// asm.js modules with TurboFan.
- if (module->origin == kAsmJsOrigin) return ExecutionTier::kTurbofan;
+ if (is_asmjs_module(module)) return ExecutionTier::kTurbofan;
if (FLAG_wasm_interpret_all) return ExecutionTier::kInterpreter;
return FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
}
WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
+ WasmEngine* engine, CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ Counters* counters, WasmFeatures* detected) {
+ WasmCompilationResult result;
+ if (func_index_ < static_cast<int>(env->module->num_imported_functions)) {
+ result = ExecuteImportWrapperCompilation(engine, env);
+ } else {
+ result = ExecuteFunctionCompilation(engine, env, wire_bytes_storage,
+ counters, detected);
+ }
+
+ if (result.succeeded()) {
+ counters->wasm_generated_code_size()->Increment(
+ result.code_desc.instr_size);
+ counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
+ }
+
+ result.func_index = func_index_;
+ result.requested_tier = tier_;
+
+ return result;
+}
+
+WasmCompilationResult WasmCompilationUnit::ExecuteImportWrapperCompilation(
+ WasmEngine* engine, CompilationEnv* env) {
+ FunctionSig* sig = env->module->functions[func_index_].sig;
+ // Assume the wrapper is going to be a JS function with matching arity at
+ // instantiation time.
+ auto kind = compiler::kDefaultImportCallKind;
+ bool source_positions = is_asmjs_module(env->module);
+ WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ engine, env, kind, sig, source_positions);
+ return result;
+}
+
+WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
WasmEngine* wasm_engine, CompilationEnv* env,
const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
@@ -167,17 +208,32 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
break;
}
- result.func_index = func_index_;
- result.requested_tier = tier_;
+ return result;
+}
- if (result.succeeded()) {
- counters->wasm_generated_code_size()->Increment(
- result.code_desc.instr_size);
- counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
- }
+namespace {
+bool must_record_function_compilation(Isolate* isolate) {
+ return isolate->logger()->is_listening_to_code_events() ||
+ isolate->is_profiling();
+}
- return result;
+PRINTF_FORMAT(3, 4)
+void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
+ const char* format, ...) {
+ DCHECK(must_record_function_compilation(isolate));
+
+ ScopedVector<char> buffer(128);
+ va_list arguments;
+ va_start(arguments, format);
+ int len = VSNPrintF(buffer, format, arguments);
+ CHECK_LT(0, len);
+ va_end(arguments);
+ Handle<String> name_str =
+ isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ AbstractCode::cast(*code), *name_str));
}
+} // namespace
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
@@ -190,6 +246,8 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
+ DCHECK_LE(native_module->num_imported_functions(), function->func_index);
+ DCHECK_LT(function->func_index, native_module->num_functions());
WasmCompilationUnit unit(function->func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
@@ -204,6 +262,46 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
}
}
+JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(Isolate* isolate,
+ FunctionSig* sig,
+ bool is_import)
+ : job_(compiler::NewJSToWasmCompilationJob(isolate, sig, is_import)) {}
+
+JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
+
+void JSToWasmWrapperCompilationUnit::Prepare(Isolate* isolate) {
+ CompilationJob::Status status = job_->PrepareJob(isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+}
+
+void JSToWasmWrapperCompilationUnit::Execute() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
+ DCHECK_EQ(job_->state(), CompilationJob::State::kReadyToExecute);
+ CompilationJob::Status status = job_->ExecuteJob();
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+}
+
+Handle<Code> JSToWasmWrapperCompilationUnit::Finalize(Isolate* isolate) {
+ CompilationJob::Status status = job_->FinalizeJob(isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ Handle<Code> code = job_->compilation_info()->code();
+ if (must_record_function_compilation(isolate)) {
+ RecordWasmHeapStubCompilation(
+ isolate, code, "%s", job_->compilation_info()->GetDebugName().get());
+ }
+ return code;
+}
+
+// static
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ Isolate* isolate, FunctionSig* sig, bool is_import) {
+ // Run the compilation unit synchronously.
+ JSToWasmWrapperCompilationUnit unit(isolate, sig, is_import);
+ unit.Prepare(isolate);
+ unit.Execute();
+ return unit.Finalize(isolate);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index e7d8ff9471..d0b47b91aa 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -18,6 +18,7 @@ namespace internal {
class AssemblerBuffer;
class Counters;
+class OptimizedCompilationJob;
namespace wasm {
@@ -34,6 +35,10 @@ class WasmInstructionBuffer final {
static std::unique_ptr<WasmInstructionBuffer> New();
+ // Override {operator delete} to avoid implicit instantiation of {operator
+ // delete} with {size_t} argument. The {size_t} argument would be incorrect.
+ void operator delete(void* ptr) { ::operator delete(ptr); }
+
private:
WasmInstructionBuffer() = delete;
DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
@@ -43,6 +48,12 @@ struct WasmCompilationResult {
public:
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
+ enum Kind : int8_t {
+ kFunction,
+ kWasmToJsWrapper,
+ kInterpreterEntry,
+ };
+
bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); }
@@ -53,9 +64,10 @@ struct WasmCompilationResult {
uint32_t tagged_parameter_slots = 0;
OwnedVector<byte> source_positions;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
- int func_index;
+ int func_index = static_cast<int>(kAnonymousFuncIndex);
ExecutionTier requested_tier;
ExecutionTier result_tier;
+ Kind kind = kFunction;
};
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
@@ -77,6 +89,14 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ExecutionTier);
private:
+ WasmCompilationResult ExecuteFunctionCompilation(
+ WasmEngine* wasm_engine, CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ Counters* counters, WasmFeatures* detected);
+
+ WasmCompilationResult ExecuteImportWrapperCompilation(WasmEngine* engine,
+ CompilationEnv* env);
+
int func_index_;
ExecutionTier tier_;
};
@@ -86,6 +106,24 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ASSERT_TRIVIALLY_COPYABLE(WasmCompilationUnit);
STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
+class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
+ public:
+ JSToWasmWrapperCompilationUnit(Isolate* isolate, FunctionSig* sig,
+ bool is_import);
+ ~JSToWasmWrapperCompilationUnit();
+
+ void Prepare(Isolate* isolate);
+ void Execute();
+ Handle<Code> Finalize(Isolate* isolate);
+
+ // Run a compilation unit synchronously.
+ static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
+ bool is_import);
+
+ private:
+ std::unique_ptr<OptimizedCompilationJob> job_;
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 90d8749f2c..8efac18787 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -291,14 +291,14 @@ class WasmGraphBuildingInterface {
BUILD(SetGlobal, imm.index, value.node);
}
- void GetTable(FullDecoder* decoder, const Value& index, Value* result,
+ void TableGet(FullDecoder* decoder, const Value& index, Value* result,
const TableIndexImmediate<validate>& imm) {
- result->node = BUILD(GetTable, imm.index, index.node, decoder->position());
+ result->node = BUILD(TableGet, imm.index, index.node, decoder->position());
}
- void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
+ void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
const TableIndexImmediate<validate>& imm) {
- BUILD(SetTable, imm.index, index.node, value.node, decoder->position());
+ BUILD(TableSet, imm.index, index.node, value.node, decoder->position());
}
void Unreachable(FullDecoder* decoder) {
@@ -532,6 +532,8 @@ class WasmGraphBuildingInterface {
if (result) result->node = node;
}
+ void AtomicFence(FullDecoder* decoder) { BUILD(AtomicFence); }
+
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
@@ -567,7 +569,7 @@ class WasmGraphBuildingInterface {
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(TableCopy, imm.table_src.index, imm.table_dst.index, args[0].node,
+ BUILD(TableCopy, imm.table_dst.index, imm.table_src.index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
@@ -691,8 +693,8 @@ class WasmGraphBuildingInterface {
case kWasmS128:
return builder_->S128Zero();
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return builder_->RefNull();
default:
UNREACHABLE();
@@ -717,7 +719,7 @@ class WasmGraphBuildingInterface {
Value& val = stack_values[i];
Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
- DCHECK(val.type == kWasmVar ||
+ DCHECK(val.type == kWasmBottom ||
ValueTypes::MachineRepresentationFor(val.type) ==
ValueTypes::MachineRepresentationFor(old.type));
old.node = first ? val.node
diff --git a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h b/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
deleted file mode 100644
index ba2093d2c1..0000000000
--- a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
-#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
-
-#include "src/compiler/wasm-compiler.h"
-#include "src/logging/counters.h"
-#include "src/wasm/value-type.h"
-#include "src/wasm/wasm-code-manager.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-class JSToWasmWrapperCache {
- public:
- Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
- bool is_import) {
- std::pair<bool, FunctionSig> key(is_import, *sig);
- Handle<Code>& cached = cache_[key];
- if (cached.is_null()) {
- cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
- .ToHandleChecked();
- }
- return cached;
- }
-
- private:
- // We generate different code for calling imports than calling wasm functions
- // in this module. Both are cached separately.
- using CacheKey = std::pair<bool, FunctionSig>;
- std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 93ff8a9317..7c41c0a209 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -17,8 +17,8 @@ namespace wasm {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Use a push, because mov to an extended register takes 6 bytes.
- pushq(Immediate(func_index)); // max 5 bytes
- EmitJumpSlot(lazy_compile_target); // always 5 bytes
+ pushq_imm32(func_index); // 5 bytes
+ EmitJumpSlot(lazy_compile_target); // 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -43,7 +43,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
- jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
+ jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -97,13 +97,17 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
- Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+ int start = pc_offset();
+ Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // 1-2 instr
+ Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK(nop_bytes == 0 || nop_bytes == kInstrSize);
+ if (nop_bytes) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
- CheckConstPool(true, false); // force emit of const pool
+ ForceConstantPoolEmissionWithoutJump();
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
@@ -154,10 +158,14 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
+ int start = pc_offset();
li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
// Jump produces max. 4 instructions for 32-bit platform
// and max. 6 instructions for 64-bit platform.
Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -179,12 +187,16 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
+ int start = pc_offset();
// Load function index to register. max 5 instrs
mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
// Jump to {lazy_compile_target}. max 5 instrs
mov(r0, Operand(lazy_compile_target));
mtctr(r0);
bctr();
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index eef9fea167..379a547b55 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -17,7 +17,16 @@ namespace wasm {
// each slot containing a dispatch to the currently published {WasmCode} that
// corresponds to the function.
//
-// Note that the table is split into lines of fixed size, with lines laid out
+// Additionally to this main jump table, there exist special jump tables for
+// other purposes:
+// - the runtime stub table contains one entry per wasm runtime stub (see
+// {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded
+// builtin.
+// - the lazy compile table contains one entry per wasm function which jumps to
+// the common {WasmCompileLazy} builtin and passes the function index that was
+// invoked.
+//
+// The main jump table is split into lines of fixed size, with lines laid out
// consecutively within the executable memory of the {NativeModule}. The slots
// in turn are consecutive within a line, but do not cross line boundaries.
//
@@ -27,6 +36,7 @@ namespace wasm {
//
// The above illustrates jump table lines {Li} containing slots {Si} with each
// line containing {n} slots and some padding {x} for alignment purposes.
+// Other jump tables are just consecutive.
class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
public:
// Translate an offset into the continuous jump table to a jump table index.
@@ -39,7 +49,7 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
}
// Translate a jump table index to an offset into the continuous jump table.
- static uint32_t SlotIndexToOffset(uint32_t slot_index) {
+ static uint32_t JumpSlotIndexToOffset(uint32_t slot_index) {
uint32_t line_index = slot_index / kJumpTableSlotsPerLine;
uint32_t line_offset =
(slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize;
@@ -60,40 +70,56 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
return slot_index * kJumpTableStubSlotSize;
}
+ // Translate a slot index to an offset into the lazy compile table.
+ static uint32_t LazyCompileSlotIndexToOffset(uint32_t slot_index) {
+ return slot_index * kLazyCompileTableSlotSize;
+ }
+
// Determine the size of a jump table containing only runtime stub slots.
static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
return slot_count * kJumpTableStubSlotSize;
}
- static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
- uint32_t func_index,
- Address lazy_compile_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + SlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
- jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableSlotSize);
+ // Determine the size of a lazy compile table.
+ static constexpr uint32_t SizeForNumberOfLazyFunctions(uint32_t slot_count) {
+ return slot_count * kLazyCompileTableSlotSize;
+ }
+
+ static void GenerateLazyCompileTable(Address base, uint32_t num_slots,
+ uint32_t num_imported_functions,
+ Address wasm_compile_lazy_target) {
+ uint32_t lazy_compile_table_size = num_slots * kLazyCompileTableSlotSize;
+ // Assume enough space, so the Assembler does not try to grow the buffer.
+ JumpTableAssembler jtasm(base, lazy_compile_table_size + 256);
+ for (uint32_t slot_index = 0; slot_index < num_slots; ++slot_index) {
+ DCHECK_EQ(slot_index * kLazyCompileTableSlotSize, jtasm.pc_offset());
+ jtasm.EmitLazyCompileJumpSlot(slot_index + num_imported_functions,
+ wasm_compile_lazy_target);
}
+ DCHECK_EQ(lazy_compile_table_size, jtasm.pc_offset());
+ FlushInstructionCache(base, lazy_compile_table_size);
}
- static void EmitRuntimeStubSlot(Address base, uint32_t slot_index,
- Address builtin_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + StubSlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitRuntimeStubSlot(builtin_target);
- jtasm.NopBytes(kJumpTableStubSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableStubSlotSize);
+ static void GenerateRuntimeStubTable(Address base, Address* targets,
+ int num_stubs) {
+ uint32_t table_size = num_stubs * kJumpTableStubSlotSize;
+ // Assume enough space, so the Assembler does not try to grow the buffer.
+ JumpTableAssembler jtasm(base, table_size + 256);
+ int offset = 0;
+ for (int index = 0; index < num_stubs; ++index) {
+ DCHECK_EQ(offset, StubSlotIndexToOffset(index));
+ DCHECK_EQ(offset, jtasm.pc_offset());
+ jtasm.EmitRuntimeStubSlot(targets[index]);
+ offset += kJumpTableStubSlotSize;
+ jtasm.NopBytes(offset - jtasm.pc_offset());
}
+ FlushInstructionCache(base, table_size);
}
static void PatchJumpTableSlot(Address base, uint32_t slot_index,
Address new_target,
WasmCode::FlushICache flush_i_cache) {
- Address slot = base + SlotIndexToOffset(slot_index);
+ Address slot = base + JumpSlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitJumpSlot(new_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
@@ -115,44 +141,54 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
// boundaries. The jump table line size has been chosen to satisfy this.
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 10;
+ static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 10;
+ static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
- static constexpr int kJumpTableLineSize = 5 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
-#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 5 * kInstrSize;
+ static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
+#elif V8_TARGET_ARCH_ARM64
+ static constexpr int kJumpTableLineSize = 1 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 1 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 3 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 128;
- static constexpr int kJumpTableSlotSize = 20;
+ static constexpr int kJumpTableSlotSize = 14;
+ static constexpr int kLazyCompileTableSlotSize = 20;
static constexpr int kJumpTableStubSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 48;
+ static constexpr int kJumpTableSlotSize = 7 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#else
static constexpr int kJumpTableLineSize = 1;
static constexpr int kJumpTableSlotSize = 1;
+ static constexpr int kLazyCompileTableSlotSize = 1;
static constexpr int kJumpTableStubSlotSize = 1;
#endif
static constexpr int kJumpTableSlotsPerLine =
kJumpTableLineSize / kJumpTableSlotSize;
+ STATIC_ASSERT(kJumpTableSlotsPerLine >= 1);
// {JumpTableAssembler} is never used during snapshot generation, and its code
// must be independent of the code range of any isolate anyway. Just ensure
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 10483cf8ea..b11a557195 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -6,7 +6,7 @@
#include <cinttypes>
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -22,9 +22,9 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
case MachineRepresentation::rep: \
SNPrintF(value, str ":" format, \
- ReadLittleEndianValue<ctype1>( \
+ base::ReadLittleEndianValue<ctype1>( \
reinterpret_cast<Address>(mem_start) + info->address), \
- ReadLittleEndianValue<ctype2>( \
+ base::ReadLittleEndianValue<ctype2>( \
reinterpret_cast<Address>(mem_start) + info->address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 3bb6eb1e58..b5a58d4f27 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -24,7 +24,6 @@
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/identity-map.h"
-#include "src/wasm/js-to-wasm-wrapper-cache.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -34,6 +33,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -152,6 +152,9 @@ class CompilationUnitQueues {
for (int task_id = 0; task_id < max_tasks; ++task_id) {
queues_[task_id].next_steal_task_id = next_task_id(task_id);
}
+ for (auto& atomic_counter : num_units_) {
+ std::atomic_init(&atomic_counter, size_t{0});
+ }
}
base::Optional<WasmCompilationUnit> GetNextUnit(
@@ -254,15 +257,14 @@ class CompilationUnitQueues {
};
struct BigUnitsQueue {
- BigUnitsQueue() = default;
+ BigUnitsQueue() {
+ for (auto& atomic : has_units) std::atomic_init(&atomic, false);
+ }
base::Mutex mutex;
// Can be read concurrently to check whether any elements are in the queue.
- std::atomic_bool has_units[kNumTiers] = {
- ATOMIC_VAR_INIT(false),
- ATOMIC_VAR_INIT(false)
- };
+ std::atomic<bool> has_units[kNumTiers];
// Protected by {mutex}:
std::priority_queue<BigUnit> units[kNumTiers];
@@ -271,11 +273,8 @@ class CompilationUnitQueues {
std::vector<Queue> queues_;
BigUnitsQueue big_units_queue_;
- std::atomic_size_t num_units_[kNumTiers] = {
- ATOMIC_VAR_INIT(0),
- ATOMIC_VAR_INIT(0)
- };
- std::atomic_int next_queue_to_add{0};
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<int> next_queue_to_add{0};
int next_task_id(int task_id) const {
int next = task_id + 1;
@@ -382,7 +381,7 @@ class CompilationStateImpl {
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
// is invoked which triggers background compilation.
- void InitializeCompilationProgress(bool lazy_module);
+ void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@@ -411,13 +410,11 @@ class CompilationStateImpl {
bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
- return outstanding_baseline_functions_ == 0;
+ return outstanding_baseline_units_ == 0;
}
bool top_tier_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
return outstanding_top_tier_functions_ == 0;
}
@@ -473,7 +470,7 @@ class CompilationStateImpl {
// Compilation error, atomically updated. This flag can be updated and read
// using relaxed semantics.
- std::atomic_bool compile_failed_{false};
+ std::atomic<bool> compile_failed_{false};
const int max_background_tasks_ = 0;
@@ -519,7 +516,7 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;
- int outstanding_baseline_functions_ = 0;
+ int outstanding_baseline_units_ = 0;
int outstanding_top_tier_functions_ = 0;
std::vector<uint8_t> compilation_progress_;
@@ -701,6 +698,10 @@ class CompilationUnitBuilder {
native_module->module())) {}
void AddUnits(uint32_t func_index) {
+ if (func_index < native_module_->module()->num_imported_functions) {
+ baseline_units_.emplace_back(func_index, ExecutionTier::kNone);
+ return;
+ }
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
@@ -823,7 +824,7 @@ void ValidateSequentially(
bool IsLazyModule(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
+ (FLAG_asm_wasm_lazy_compilation && is_asmjs_module(module));
}
} // namespace
@@ -848,6 +849,8 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
ExecutionTierPair tiers = GetRequestedExecutionTiers(
module, compilation_state->compile_mode(), enabled_features, func_index);
+ DCHECK_LE(native_module->num_imported_functions(), func_index);
+ DCHECK_LT(func_index, native_module->num_functions());
WasmCompilationUnit baseline_unit(func_index, tiers.baseline_tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
@@ -857,7 +860,7 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
// During lazy compilation, we can only get compilation errors when
// {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
// verified before starting its execution.
- DCHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation);
+ CHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation);
const WasmFunction* func = &module->functions[func_index];
if (result.failed()) {
ErrorThrower thrower(isolate, nullptr);
@@ -972,6 +975,29 @@ bool ExecuteCompilationUnits(
std::vector<WasmCode*> code_vector =
compile_scope->native_module()->AddCompiledCode(
VectorOf(results_to_publish));
+
+ // For import wrapper compilation units, add result to the cache.
+ const NativeModule* native_module = compile_scope->native_module();
+ int num_imported_functions = native_module->num_imported_functions();
+ DCHECK_EQ(code_vector.size(), results_to_publish.size());
+ WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
+ for (WasmCode* code : code_vector) {
+ int func_index = code->index();
+ DCHECK_LE(0, func_index);
+ DCHECK_LT(func_index, native_module->num_functions());
+ if (func_index < num_imported_functions) {
+ FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind,
+ sig);
+ // If two imported functions have the same key, only one of them should
+ // have been added as a compilation unit. So it is always the first time
+ // we compile a wrapper for this key here.
+ DCHECK_NULL((*cache)[key]);
+ (*cache)[key] = code;
+ code->IncRef();
+ }
+ }
+
compile_scope->compilation_state()->OnFinishedUnits(VectorOf(code_vector));
results_to_publish.clear();
};
@@ -1023,15 +1049,39 @@ bool ExecuteCompilationUnits(
return true;
}
+// Returns the number of units added.
+int AddImportWrapperUnits(NativeModule* native_module,
+ CompilationUnitBuilder* builder) {
+ std::unordered_set<WasmImportWrapperCache::CacheKey,
+ WasmImportWrapperCache::CacheKeyHash>
+ keys;
+ int num_imported_functions = native_module->num_imported_functions();
+ for (int func_index = 0; func_index < num_imported_functions; func_index++) {
+ FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ bool has_bigint_feature = native_module->enabled_features().bigint;
+ if (!IsJSCompatibleSignature(sig, has_bigint_feature)) {
+ continue;
+ }
+ WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind, sig);
+ auto it = keys.insert(key);
+ if (it.second) {
+ // Ensure that all keys exist in the cache, so that we can populate the
+ // cache later without locking.
+ (*native_module->import_wrapper_cache())[key] = nullptr;
+ builder->AddUnits(func_index);
+ }
+ }
+ return static_cast<int>(keys.size());
+}
+
void InitializeCompilationUnits(NativeModule* native_module) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
const bool lazy_module = IsLazyModule(native_module->module());
- compilation_state->InitializeCompilationProgress(lazy_module);
-
ModuleWireBytes wire_bytes(native_module->wire_bytes());
CompilationUnitBuilder builder(native_module);
auto* module = native_module->module();
+
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
@@ -1047,6 +1097,9 @@ void InitializeCompilationUnits(NativeModule* native_module) {
builder.AddUnits(func_index);
}
}
+ int num_import_wrappers = AddImportWrapperUnits(native_module, &builder);
+ compilation_state->InitializeCompilationProgress(lazy_module,
+ num_import_wrappers);
builder.Commit();
}
@@ -1111,9 +1164,12 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const bool lazy_module = IsLazyModule(wasm_module);
- if (!FLAG_wasm_lazy_validation &&
+ if (!FLAG_wasm_lazy_validation && wasm_module->origin == kWasmOrigin &&
MayCompriseLazyFunctions(wasm_module, native_module->enabled_features(),
lazy_module)) {
+ // Validate wasm modules for lazy compilation if requested. Never validate
+ // asm.js modules as these are valid by construction (additionally a CHECK
+ // will catch this during lazy compilation).
ValidateSequentially(wasm_module, native_module, isolate->counters(),
isolate->allocator(), thrower, lazy_module,
kOnlyLazyFunctions);
@@ -1256,6 +1312,7 @@ AsyncCompileJob::AsyncCompileJob(
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
resolver_(std::move(resolver)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "new AsyncCompileJob");
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
@@ -1386,6 +1443,8 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
void AsyncCompileJob::FinishCompile() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "AsyncCompileJob::FinishCompile");
bool is_after_deserialization = !module_object_.is_null();
if (!is_after_deserialization) {
PrepareRuntimeObjects();
@@ -1865,7 +1924,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
if (section_code == SectionCode::kUnknownSectionCode) {
Decoder decoder(bytes, offset);
section_code = ModuleDecoder::IdentifyUnknownSection(
- decoder, bytes.begin() + bytes.length());
+ &decoder, bytes.begin() + bytes.length());
if (section_code == SectionCode::kUnknownSectionCode) {
// Skip unknown sections that we do not know how to handle.
return true;
@@ -1902,13 +1961,19 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
const bool lazy_module = job_->wasm_lazy_compilation_;
- compilation_state->InitializeCompilationProgress(lazy_module);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
compilation_unit_builder_.reset(
new CompilationUnitBuilder(job_->native_module_.get()));
+
+ NativeModule* native_module = job_->native_module_.get();
+
+ int num_import_wrappers =
+ AddImportWrapperUnits(native_module, compilation_unit_builder_.get());
+ compilation_state->InitializeCompilationProgress(lazy_module,
+ num_import_wrappers);
return true;
}
@@ -2079,16 +2144,16 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
-void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
+void CompilationStateImpl::InitializeCompilationProgress(
+ bool lazy_module, int num_import_wrappers) {
DCHECK(!failed());
auto enabled_features = native_module_->enabled_features();
auto* module = native_module_->module();
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_EQ(0, outstanding_baseline_functions_);
+ DCHECK_EQ(0, outstanding_baseline_units_);
DCHECK_EQ(0, outstanding_top_tier_functions_);
compilation_progress_.reserve(module->num_declared_functions);
-
int start = module->num_imported_functions;
int end = start + module->num_declared_functions;
for (int func_index = start; func_index < end; func_index++) {
@@ -2104,7 +2169,7 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
strategy == CompileStrategy::kLazyBaselineEagerTopTier);
// Count functions to complete baseline and top tier compilation.
- if (required_for_baseline) outstanding_baseline_functions_++;
+ if (required_for_baseline) outstanding_baseline_units_++;
if (required_for_top_tier) outstanding_top_tier_functions_++;
// Initialize function's compilation progress.
@@ -2120,24 +2185,25 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
RequiredTopTierField::update(function_progress, required_top_tier);
compilation_progress_.push_back(function_progress);
}
- DCHECK_IMPLIES(lazy_module, outstanding_baseline_functions_ == 0);
+ DCHECK_IMPLIES(lazy_module, outstanding_baseline_units_ == 0);
DCHECK_IMPLIES(lazy_module, outstanding_top_tier_functions_ == 0);
- DCHECK_LE(0, outstanding_baseline_functions_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+ DCHECK_LE(0, outstanding_baseline_units_);
+ DCHECK_LE(outstanding_baseline_units_, outstanding_top_tier_functions_);
+ outstanding_baseline_units_ += num_import_wrappers;
// Trigger callbacks if module needs no baseline or top tier compilation. This
// can be the case for an empty or fully lazy module.
- if (outstanding_baseline_functions_ == 0) {
+ if (outstanding_baseline_units_ == 0) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
- }
- if (outstanding_top_tier_functions_ == 0) {
- for (auto& callback : callbacks_) {
- callback(CompilationEvent::kFinishedTopTierCompilation);
+ if (outstanding_top_tier_functions_ == 0) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ // Clear the callbacks because no more events will be delivered.
+ callbacks_.clear();
}
- // Clear the callbacks because no more events will be delivered.
- callbacks_.clear();
}
}
@@ -2170,10 +2236,10 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
base::MutexGuard guard(&callbacks_mutex_);
- // In case of no outstanding functions we can return early.
+ // In case of no outstanding compilation units we can return early.
// This is especially important for lazy modules that were deserialized.
// Compilation progress was not set up in these cases.
- if (outstanding_baseline_functions_ == 0 &&
+ if (outstanding_baseline_units_ == 0 &&
outstanding_top_tier_functions_ == 0) {
return;
}
@@ -2190,49 +2256,61 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
for (WasmCode* code : code_vector) {
DCHECK_NOT_NULL(code);
- DCHECK_NE(code->tier(), ExecutionTier::kNone);
- native_module_->engine()->LogCode(code);
-
- // Read function's compilation progress.
- // This view on the compilation progress may differ from the actually
- // compiled code. Any lazily compiled function does not contribute to the
- // compilation progress but may publish code to the code manager.
- int slot_index =
- code->index() - native_module_->module()->num_imported_functions;
- uint8_t function_progress = compilation_progress_[slot_index];
- ExecutionTier required_baseline_tier =
- RequiredBaselineTierField::decode(function_progress);
- ExecutionTier required_top_tier =
- RequiredTopTierField::decode(function_progress);
- ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
+ DCHECK_LT(code->index(), native_module_->num_functions());
bool completes_baseline_compilation = false;
bool completes_top_tier_compilation = false;
- // Check whether required baseline or top tier are reached.
- if (reached_tier < required_baseline_tier &&
- required_baseline_tier <= code->tier()) {
- DCHECK_GT(outstanding_baseline_functions_, 0);
- outstanding_baseline_functions_--;
- if (outstanding_baseline_functions_ == 0) {
+ if (code->index() < native_module_->num_imported_functions()) {
+ // Import wrapper.
+ DCHECK_EQ(code->tier(), ExecutionTier::kTurbofan);
+ outstanding_baseline_units_--;
+ if (outstanding_baseline_units_ == 0) {
completes_baseline_compilation = true;
}
- }
- if (reached_tier < required_top_tier && required_top_tier <= code->tier()) {
- DCHECK_GT(outstanding_top_tier_functions_, 0);
- outstanding_top_tier_functions_--;
- if (outstanding_top_tier_functions_ == 0) {
- completes_top_tier_compilation = true;
+ } else {
+ // Function.
+ DCHECK_NE(code->tier(), ExecutionTier::kNone);
+ native_module_->engine()->LogCode(code);
+
+ // Read function's compilation progress.
+ // This view on the compilation progress may differ from the actually
+ // compiled code. Any lazily compiled function does not contribute to the
+ // compilation progress but may publish code to the code manager.
+ int slot_index =
+ code->index() - native_module_->module()->num_imported_functions;
+ uint8_t function_progress = compilation_progress_[slot_index];
+ ExecutionTier required_baseline_tier =
+ RequiredBaselineTierField::decode(function_progress);
+ ExecutionTier required_top_tier =
+ RequiredTopTierField::decode(function_progress);
+ ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
+
+ // Check whether required baseline or top tier are reached.
+ if (reached_tier < required_baseline_tier &&
+ required_baseline_tier <= code->tier()) {
+ DCHECK_GT(outstanding_baseline_units_, 0);
+ outstanding_baseline_units_--;
+ if (outstanding_baseline_units_ == 0) {
+ completes_baseline_compilation = true;
+ }
+ }
+ if (reached_tier < required_top_tier &&
+ required_top_tier <= code->tier()) {
+ DCHECK_GT(outstanding_top_tier_functions_, 0);
+ outstanding_top_tier_functions_--;
+ if (outstanding_top_tier_functions_ == 0) {
+ completes_top_tier_compilation = true;
+ }
}
- }
- // Update function's compilation progress.
- if (code->tier() > reached_tier) {
- compilation_progress_[slot_index] = ReachedTierField::update(
- compilation_progress_[slot_index], code->tier());
+ // Update function's compilation progress.
+ if (code->tier() > reached_tier) {
+ compilation_progress_[slot_index] = ReachedTierField::update(
+ compilation_progress_[slot_index], code->tier());
+ }
+ DCHECK_LE(0, outstanding_baseline_units_);
}
- DCHECK_LE(0, outstanding_baseline_functions_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
// Trigger callbacks.
if (completes_baseline_compilation) {
@@ -2240,8 +2318,11 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
+ if (outstanding_top_tier_functions_ == 0) {
+ completes_top_tier_compilation = true;
+ }
}
- if (completes_top_tier_compilation) {
+ if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
@@ -2335,24 +2416,83 @@ void CompilationStateImpl::SetError() {
callbacks_.clear();
}
+namespace {
+using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
+using JSToWasmWrapperQueue =
+ WrapperQueue<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>>;
+using JSToWasmWrapperUnitMap =
+ std::unordered_map<JSToWasmWrapperKey,
+ std::unique_ptr<JSToWasmWrapperCompilationUnit>,
+ base::hash<JSToWasmWrapperKey>>;
+
+class CompileJSToWasmWrapperTask final : public CancelableTask {
+ public:
+ CompileJSToWasmWrapperTask(CancelableTaskManager* task_manager,
+ JSToWasmWrapperQueue* queue,
+ JSToWasmWrapperUnitMap* compilation_units)
+ : CancelableTask(task_manager),
+ queue_(queue),
+ compilation_units_(compilation_units) {}
+
+ void RunInternal() override {
+ while (base::Optional<JSToWasmWrapperKey> key = queue_->pop()) {
+ JSToWasmWrapperCompilationUnit* unit = (*compilation_units_)[*key].get();
+ unit->Execute();
+ }
+ }
+
+ private:
+ JSToWasmWrapperQueue* const queue_;
+ JSToWasmWrapperUnitMap* const compilation_units_;
+};
+} // namespace
+
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers) {
- JSToWasmWrapperCache js_to_wasm_cache;
+ JSToWasmWrapperQueue queue;
+ JSToWasmWrapperUnitMap compilation_units;
- // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
- // optimization we keep the code space unlocked to avoid repeated unlocking
- // because many such wrapper are allocated in sequence below.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ // Prepare compilation units in the main thread.
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
auto& function = module->functions[exp.index];
- Handle<Code> wrapper_code = js_to_wasm_cache.GetOrCompileJSToWasmWrapper(
- isolate, function.sig, function.imported);
- int wrapper_index =
- GetExportWrapperIndex(module, function.sig, function.imported);
+ JSToWasmWrapperKey key(function.imported, *function.sig);
+ if (queue.insert(key)) {
+ auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
+ isolate, function.sig, function.imported);
+ unit->Prepare(isolate);
+ compilation_units.emplace(key, std::move(unit));
+ }
+ }
+
+ // Execute compilation jobs in the background.
+ CancelableTaskManager task_manager;
+ const int max_background_tasks = GetMaxBackgroundTasks();
+ for (int i = 0; i < max_background_tasks; ++i) {
+ auto task = base::make_unique<CompileJSToWasmWrapperTask>(
+ &task_manager, &queue, &compilation_units);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ }
+
+ // Work in the main thread too.
+ while (base::Optional<JSToWasmWrapperKey> key = queue.pop()) {
+ JSToWasmWrapperCompilationUnit* unit = compilation_units[*key].get();
+ unit->Execute();
+ }
+ task_manager.CancelAndWait();
- export_wrappers->set(wrapper_index, *wrapper_code);
- RecordStats(*wrapper_code, isolate->counters());
+ // Finalize compilation jobs in the main thread.
+ // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
+ // optimization we keep the code space unlocked to avoid repeated unlocking
+ // because many such wrapper are allocated in sequence below.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ for (auto& pair : compilation_units) {
+ JSToWasmWrapperKey key = pair.first;
+ JSToWasmWrapperCompilationUnit* unit = pair.second.get();
+ Handle<Code> code = unit->Finalize(isolate);
+ int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first);
+ export_wrappers->set(wrapper_index, *code);
+ RecordStats(*code, isolate->counters());
}
}
@@ -2365,17 +2505,24 @@ WasmCode* CompileImportWrapper(
// yet.
WasmImportWrapperCache::CacheKey key(kind, sig);
DCHECK_NULL((*cache_scope)[key]);
- bool source_positions = native_module->module()->origin == kAsmJsOrigin;
+ bool source_positions = is_asmjs_module(native_module->module());
// Keep the {WasmCode} alive until we explicitly call {IncRef}.
WasmCodeRefScope code_ref_scope;
- WasmCode* wasm_code = compiler::CompileWasmImportCallWrapper(
- wasm_engine, native_module, kind, sig, source_positions);
- (*cache_scope)[key] = wasm_code;
- wasm_code->IncRef();
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ wasm_engine, &env, kind, sig, source_positions);
+ std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions), GetCodeKind(result),
+ ExecutionTier::kNone);
+ WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
+ (*cache_scope)[key] = published_code;
+ published_code->IncRef();
counters->wasm_generated_code_size()->Increment(
- wasm_code->instructions().length());
- counters->wasm_reloc_size()->Increment(wasm_code->reloc_info().length());
- return wasm_code;
+ published_code->instructions().length());
+ counters->wasm_reloc_size()->Increment(published_code->reloc_info().length());
+ return published_code;
}
Handle<Script> CreateWasmScript(Isolate* isolate,
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index d465d6a322..27c7bff868 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -9,6 +9,7 @@
#include <functional>
#include <memory>
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
#include "src/wasm/compilation-environment.h"
@@ -67,6 +68,33 @@ bool CompileLazy(Isolate*, NativeModule*, int func_index);
int GetMaxBackgroundTasks();
+template <typename Key, typename Hash>
+class WrapperQueue {
+ public:
+ // Removes an arbitrary key from the queue and returns it.
+ // If the queue is empty, returns nullopt.
+ // Thread-safe.
+ base::Optional<Key> pop() {
+ base::Optional<Key> key = base::nullopt;
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ auto it = queue_.begin();
+ if (it != queue_.end()) {
+ key = *it;
+ queue_.erase(it);
+ }
+ return key;
+ }
+
+ // Add the given key to the queue and returns true iff the insert was
+ // successful.
+ // Not thread-safe.
+ bool insert(const Key& key) { return queue_.insert(key).second; }
+
+ private:
+ base::Mutex mutex_;
+ std::unordered_set<Key, Hash> queue_;
+};
+
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
// as foreground and background tasks. Any phase that touches the V8 heap or
@@ -91,6 +119,8 @@ class AsyncCompileJob {
Isolate* isolate() const { return isolate_; }
+ Handle<Context> context() const { return native_context_; }
+
private:
class CompileTask;
class CompileStep;
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 4201b1e76c..56712977b1 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -123,7 +123,7 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
case WasmInitExpr::kRefNullConst:
return kWasmNullRef;
case WasmInitExpr::kRefFuncConst:
- return kWasmAnyFunc;
+ return kWasmFuncRef;
default:
UNREACHABLE();
}
@@ -131,35 +131,35 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
-WireBytesRef consume_string(Decoder& decoder, bool validate_utf8,
+WireBytesRef consume_string(Decoder* decoder, bool validate_utf8,
const char* name) {
- uint32_t length = decoder.consume_u32v("string length");
- uint32_t offset = decoder.pc_offset();
- const byte* string_start = decoder.pc();
+ uint32_t length = decoder->consume_u32v("string length");
+ uint32_t offset = decoder->pc_offset();
+ const byte* string_start = decoder->pc();
// Consume bytes before validation to guarantee that the string is not oob.
if (length > 0) {
- decoder.consume_bytes(length, name);
- if (decoder.ok() && validate_utf8 &&
+ decoder->consume_bytes(length, name);
+ if (decoder->ok() && validate_utf8 &&
!unibrow::Utf8::ValidateEncoding(string_start, length)) {
- decoder.errorf(string_start, "%s: no valid UTF-8 string", name);
+ decoder->errorf(string_start, "%s: no valid UTF-8 string", name);
}
}
- return {offset, decoder.failed() ? 0 : length};
+ return {offset, decoder->failed() ? 0 : length};
}
// An iterator over the sections in a wasm binary module.
// Automatically skips all unknown sections.
class WasmSectionIterator {
public:
- explicit WasmSectionIterator(Decoder& decoder)
+ explicit WasmSectionIterator(Decoder* decoder)
: decoder_(decoder),
section_code_(kUnknownSectionCode),
- section_start_(decoder.pc()),
- section_end_(decoder.pc()) {
+ section_start_(decoder->pc()),
+ section_end_(decoder->pc()) {
next();
}
- inline bool more() const { return decoder_.ok() && decoder_.more(); }
+ inline bool more() const { return decoder_->ok() && decoder_->more(); }
inline SectionCode section_code() const { return section_code_; }
@@ -184,23 +184,23 @@ class WasmSectionIterator {
// Advances to the next section, checking that decoding the current section
// stopped at {section_end_}.
void advance(bool move_to_section_end = false) {
- if (move_to_section_end && decoder_.pc() < section_end_) {
- decoder_.consume_bytes(
- static_cast<uint32_t>(section_end_ - decoder_.pc()));
- }
- if (decoder_.pc() != section_end_) {
- const char* msg = decoder_.pc() < section_end_ ? "shorter" : "longer";
- decoder_.errorf(decoder_.pc(),
- "section was %s than expected size "
- "(%u bytes expected, %zu decoded)",
- msg, section_length(),
- static_cast<size_t>(decoder_.pc() - section_start_));
+ if (move_to_section_end && decoder_->pc() < section_end_) {
+ decoder_->consume_bytes(
+ static_cast<uint32_t>(section_end_ - decoder_->pc()));
+ }
+ if (decoder_->pc() != section_end_) {
+ const char* msg = decoder_->pc() < section_end_ ? "shorter" : "longer";
+ decoder_->errorf(decoder_->pc(),
+ "section was %s than expected size "
+ "(%u bytes expected, %zu decoded)",
+ msg, section_length(),
+ static_cast<size_t>(decoder_->pc() - section_start_));
}
next();
}
private:
- Decoder& decoder_;
+ Decoder* decoder_;
SectionCode section_code_;
const byte* section_start_;
const byte* payload_start_;
@@ -209,17 +209,17 @@ class WasmSectionIterator {
// Reads the section code/name at the current position and sets up
// the embedder fields.
void next() {
- if (!decoder_.more()) {
+ if (!decoder_->more()) {
section_code_ = kUnknownSectionCode;
return;
}
- section_start_ = decoder_.pc();
- uint8_t section_code = decoder_.consume_u8("section code");
+ section_start_ = decoder_->pc();
+ uint8_t section_code = decoder_->consume_u8("section code");
// Read and check the section size.
- uint32_t section_length = decoder_.consume_u32v("section length");
+ uint32_t section_length = decoder_->consume_u32v("section length");
- payload_start_ = decoder_.pc();
- if (decoder_.checkAvailable(section_length)) {
+ payload_start_ = decoder_->pc();
+ if (decoder_->checkAvailable(section_length)) {
// Get the limit of the section within the module.
section_end_ = payload_start_ + section_length;
} else {
@@ -234,19 +234,19 @@ class WasmSectionIterator {
ModuleDecoder::IdentifyUnknownSection(decoder_, section_end_);
// As a side effect, the above function will forward the decoder to after
// the identifier string.
- payload_start_ = decoder_.pc();
+ payload_start_ = decoder_->pc();
} else if (!IsValidSectionCode(section_code)) {
- decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
- section_code);
+ decoder_->errorf(decoder_->pc(), "unknown section code #0x%02x",
+ section_code);
section_code = kUnknownSectionCode;
}
- section_code_ = decoder_.failed() ? kUnknownSectionCode
- : static_cast<SectionCode>(section_code);
+ section_code_ = decoder_->failed() ? kUnknownSectionCode
+ : static_cast<SectionCode>(section_code);
- if (section_code_ == kUnknownSectionCode && section_end_ > decoder_.pc()) {
+ if (section_code_ == kUnknownSectionCode && section_end_ > decoder_->pc()) {
// skip to the end of the unknown section.
- uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_.pc());
- decoder_.consume_bytes(remaining, "section payload");
+ uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_->pc());
+ decoder_->consume_bytes(remaining, "section payload");
}
}
};
@@ -259,13 +259,13 @@ class ModuleDecoderImpl : public Decoder {
explicit ModuleDecoderImpl(const WasmFeatures& enabled, ModuleOrigin origin)
: Decoder(nullptr, nullptr),
enabled_features_(enabled),
- origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {}
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsSloppyOrigin : origin) {}
ModuleDecoderImpl(const WasmFeatures& enabled, const byte* module_start,
const byte* module_end, ModuleOrigin origin)
: Decoder(module_start, module_end),
enabled_features_(enabled),
- origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsSloppyOrigin : origin) {
if (end_ < start_) {
error(start_, "end is less than start");
end_ = start_;
@@ -520,8 +520,8 @@ class ModuleDecoderImpl : public Decoder {
});
WasmImport* import = &module_->import_table.back();
const byte* pos = pc_;
- import->module_name = consume_string(*this, true, "module name");
- import->field_name = consume_string(*this, true, "field name");
+ import->module_name = consume_string(this, true, "module name");
+ import->field_name = consume_string(this, true, "field name");
import->kind =
static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
@@ -550,7 +550,7 @@ class ModuleDecoderImpl : public Decoder {
table->imported = true;
ValueType type = consume_reference_type();
if (!enabled_features_.anyref) {
- if (type != kWasmAnyFunc) {
+ if (type != kWasmFuncRef) {
error(pc_ - 1, "invalid table type");
break;
}
@@ -635,7 +635,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeTableSection() {
// TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
// implementation of AnyRef landed.
- uint32_t max_count = enabled_features_.anyref ? 10 : kV8MaxWasmTables;
+ uint32_t max_count = enabled_features_.anyref ? 100000 : kV8MaxWasmTables;
uint32_t table_count = consume_count("table count", max_count);
for (uint32_t i = 0; ok() && i < table_count; i++) {
@@ -694,7 +694,7 @@ class ModuleDecoderImpl : public Decoder {
});
WasmExport* exp = &module_->export_table.back();
- exp->name = consume_string(*this, true, "field name");
+ exp->name = consume_string(this, true, "field name");
const byte* pos = pc();
exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind"));
@@ -746,7 +746,7 @@ class ModuleDecoderImpl : public Decoder {
}
}
// Check for duplicate exports (except for asm.js).
- if (ok() && origin_ != kAsmJsOrigin && module_->export_table.size() > 1) {
+ if (ok() && origin_ == kWasmOrigin && module_->export_table.size() > 1) {
std::vector<WasmExport> sorted_exports(module_->export_table);
auto cmp_less = [this](const WasmExport& a, const WasmExport& b) {
@@ -808,16 +808,16 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- if (!ValueTypes::IsSubType(module_->tables[table_index].type,
- kWasmAnyFunc)) {
+ if (!ValueTypes::IsSubType(kWasmFuncRef,
+ module_->tables[table_index].type)) {
errorf(pos,
- "Invalid element segment. Table %u is not of type AnyFunc",
+ "Invalid element segment. Table %u is not of type FuncRef",
table_index);
break;
}
} else {
ValueType type = consume_reference_type();
- if (!ValueTypes::IsSubType(type, kWasmAnyFunc)) {
+ if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
error(pc_ - 1, "invalid element segment type");
break;
}
@@ -957,7 +957,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
if (name_type == NameSectionKindCode::kModule) {
- WireBytesRef name = consume_string(inner, false, "module name");
+ WireBytesRef name = consume_string(&inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
} else {
inner.consume_bytes(name_payload_len, "name subsection payload");
@@ -970,7 +970,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeSourceMappingURLSection() {
Decoder inner(start_, pc_, end_, buffer_offset_);
- WireBytesRef url = wasm::consume_string(inner, true, "module name");
+ WireBytesRef url = wasm::consume_string(&inner, true, "module name");
if (inner.ok() &&
!has_seen_unordered_section(kSourceMappingURLSectionCode)) {
const byte* url_start =
@@ -1128,7 +1128,7 @@ class ModuleDecoderImpl : public Decoder {
offset += 8;
Decoder decoder(start_ + offset, end_, offset);
- WasmSectionIterator section_iter(decoder);
+ WasmSectionIterator section_iter(&decoder);
while (ok() && section_iter.more()) {
// Shift the offset by the section header length
@@ -1269,7 +1269,7 @@ class ModuleDecoderImpl : public Decoder {
ValueTypes::TypeName(module->globals[other_index].type));
}
} else {
- if (!ValueTypes::IsSubType(global->type, TypeOf(module, global->init))) {
+ if (!ValueTypes::IsSubType(TypeOf(module, global->init), global->type)) {
errorf(pos, "type error in global initialization, expected %s, got %s",
ValueTypes::TypeName(global->type),
ValueTypes::TypeName(TypeOf(module, global->init)));
@@ -1373,32 +1373,33 @@ class ModuleDecoderImpl : public Decoder {
uint32_t consume_func_index(WasmModule* module, WasmFunction** func,
const char* name) {
- return consume_index(name, module->functions, func);
+ return consume_index(name, &module->functions, func);
}
uint32_t consume_global_index(WasmModule* module, WasmGlobal** global) {
- return consume_index("global index", module->globals, global);
+ return consume_index("global index", &module->globals, global);
}
uint32_t consume_table_index(WasmModule* module, WasmTable** table) {
- return consume_index("table index", module->tables, table);
+ return consume_index("table index", &module->tables, table);
}
uint32_t consume_exception_index(WasmModule* module, WasmException** except) {
- return consume_index("exception index", module->exceptions, except);
+ return consume_index("exception index", &module->exceptions, except);
}
template <typename T>
- uint32_t consume_index(const char* name, std::vector<T>& vector, T** ptr) {
+ uint32_t consume_index(const char* name, std::vector<T>* vector, T** ptr) {
const byte* pos = pc_;
uint32_t index = consume_u32v(name);
- if (index >= vector.size()) {
+ if (index >= vector->size()) {
errorf(pos, "%s %u out of bounds (%d entr%s)", name, index,
- static_cast<int>(vector.size()), vector.size() == 1 ? "y" : "ies");
+ static_cast<int>(vector->size()),
+ vector->size() == 1 ? "y" : "ies");
*ptr = nullptr;
return 0;
}
- *ptr = &vector[index];
+ *ptr = &(*vector)[index];
return index;
}
@@ -1594,14 +1595,14 @@ class ModuleDecoderImpl : public Decoder {
case kLocalS128:
if (enabled_features_.simd) return kWasmS128;
break;
- case kLocalAnyFunc:
- if (enabled_features_.anyref) return kWasmAnyFunc;
+ case kLocalFuncRef:
+ if (enabled_features_.anyref) return kWasmFuncRef;
break;
case kLocalAnyRef:
if (enabled_features_.anyref) return kWasmAnyRef;
break;
- case kLocalExceptRef:
- if (enabled_features_.eh) return kWasmExceptRef;
+ case kLocalExnRef:
+ if (enabled_features_.eh) return kWasmExnRef;
break;
default:
break;
@@ -1617,8 +1618,8 @@ class ModuleDecoderImpl : public Decoder {
byte val = consume_u8("reference type");
ValueTypeCode t = static_cast<ValueTypeCode>(val);
switch (t) {
- case kLocalAnyFunc:
- return kWasmAnyFunc;
+ case kLocalFuncRef:
+ return kWasmFuncRef;
case kLocalAnyRef:
if (!enabled_features_.anyref) {
error(pc_ - 1,
@@ -1680,45 +1681,41 @@ class ModuleDecoderImpl : public Decoder {
void consume_segment_header(const char* name, bool* is_active,
uint32_t* index, WasmInitExpr* offset) {
const byte* pos = pc();
- // In the MVP, this is a table or memory index field that must be 0, but
- // we've repurposed it as a flags field in the bulk memory proposal.
- uint32_t flags;
- if (enabled_features_.bulk_memory) {
- flags = consume_u32v("flags");
- if (failed()) return;
- } else {
- // Without the bulk memory proposal, we should still read the table
- // index. This is the same as reading the `ActiveWithIndex` flag with
- // the bulk memory proposal.
- flags = SegmentFlags::kActiveWithIndex;
+ uint32_t flag = consume_u32v("flag");
+
+ // Some flag values are only valid for specific proposals.
+ if (flag == SegmentFlags::kPassive) {
+ if (!enabled_features_.bulk_memory) {
+ error(
+ "Passive element segments require --experimental-wasm-bulk-memory");
+ return;
+ }
+ } else if (flag == SegmentFlags::kActiveWithIndex) {
+ if (!(enabled_features_.bulk_memory || enabled_features_.anyref)) {
+ error(
+ "Element segments with table indices require "
+ "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ return;
+ }
+ } else if (flag != SegmentFlags::kActiveNoIndex) {
+ errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flag);
+ return;
}
- bool read_index;
- bool read_offset;
- if (flags == SegmentFlags::kActiveNoIndex) {
+ // We know now that the flag is valid. Time to read the rest.
+ if (flag == SegmentFlags::kActiveNoIndex) {
*is_active = true;
- read_index = false;
- read_offset = true;
- } else if (flags == SegmentFlags::kPassive) {
+ *index = 0;
+ *offset = consume_init_expr(module_.get(), kWasmI32);
+ return;
+ }
+ if (flag == SegmentFlags::kPassive) {
*is_active = false;
- read_index = false;
- read_offset = false;
- } else if (flags == SegmentFlags::kActiveWithIndex) {
- *is_active = true;
- read_index = true;
- read_offset = true;
- } else {
- errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flags);
return;
}
-
- if (read_index) {
+ if (flag == SegmentFlags::kActiveWithIndex) {
+ *is_active = true;
*index = consume_u32v(name);
- } else {
- *index = 0;
- }
-
- if (read_offset) {
*offset = consume_init_expr(module_.get(), kWasmI32);
}
}
@@ -1833,17 +1830,17 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
return impl_->FinishDecoding(verify_functions);
}
-SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
+SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder* decoder,
const byte* end) {
WireBytesRef string = consume_string(decoder, true, "section name");
- if (decoder.failed() || decoder.pc() > end) {
+ if (decoder->failed() || decoder->pc() > end) {
return kUnknownSectionCode;
}
const byte* section_name_start =
- decoder.start() + decoder.GetBufferRelativeOffset(string.offset());
+ decoder->start() + decoder->GetBufferRelativeOffset(string.offset());
TRACE(" +%d section name : \"%.*s\"\n",
- static_cast<int>(section_name_start - decoder.start()),
+ static_cast<int>(section_name_start - decoder->start()),
string.length() < 20 ? string.length() : 20, section_name_start);
if (string.length() == num_chars(kNameString) &&
@@ -1989,20 +1986,20 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
namespace {
-bool FindNameSection(Decoder& decoder) {
+bool FindNameSection(Decoder* decoder) {
static constexpr int kModuleHeaderSize = 8;
- decoder.consume_bytes(kModuleHeaderSize, "module header");
+ decoder->consume_bytes(kModuleHeaderSize, "module header");
WasmSectionIterator section_iter(decoder);
- while (decoder.ok() && section_iter.more() &&
+ while (decoder->ok() && section_iter.more() &&
section_iter.section_code() != kNameSectionCode) {
section_iter.advance(true);
}
if (!section_iter.more()) return false;
// Reset the decoder to not read beyond the name section end.
- decoder.Reset(section_iter.payload(), decoder.pc_offset());
+ decoder->Reset(section_iter.payload(), decoder->pc_offset());
return true;
}
@@ -2014,7 +2011,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
DCHECK(names->empty());
Decoder decoder(module_start, module_end);
- if (!FindNameSection(decoder)) return;
+ if (!FindNameSection(&decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -2031,7 +2028,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
for (; decoder.ok() && functions_count > 0; --functions_count) {
uint32_t function_index = decoder.consume_u32v("function index");
- WireBytesRef name = consume_string(decoder, false, "function name");
+ WireBytesRef name = consume_string(&decoder, false, "function name");
// Be lenient with errors in the name section: Ignore non-UTF8 names. You
// can even assign to the same function multiple times (last valid one
@@ -2049,7 +2046,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
DCHECK(result->names.empty());
Decoder decoder(module_start, module_end);
- if (!FindNameSection(decoder)) return;
+ if (!FindNameSection(&decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -2074,7 +2071,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
uint32_t num_names = decoder.consume_u32v("namings count");
for (uint32_t k = 0; k < num_names; ++k) {
uint32_t local_index = decoder.consume_u32v("local index");
- WireBytesRef name = consume_string(decoder, true, "local name");
+ WireBytesRef name = consume_string(&decoder, true, "local name");
if (!decoder.ok()) break;
if (local_index > kMaxInt) continue;
func_names.max_local_index =
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 07d6e66019..8e121c9d30 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -139,12 +139,12 @@ class ModuleDecoder {
// Translates the unknown section that decoder is pointing to to an extended
// SectionCode if the unknown section is known to decoder.
- // The decoder is expected to point after the section lenght and just before
+ // The decoder is expected to point after the section length and just before
// the identifier string of the unknown section.
// If a SectionCode other than kUnknownSectionCode is returned, the decoder
// will point right after the identifier string. Otherwise, the position is
// undefined.
- static SectionCode IdentifyUnknownSection(Decoder& decoder, const byte* end);
+ static SectionCode IdentifyUnknownSection(Decoder* decoder, const byte* end);
private:
const WasmFeatures enabled_features_;
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 8293674826..a4b0139ea4 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -25,6 +25,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadLittleEndianValue;
+using base::WriteLittleEndianValue;
+
namespace {
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
@@ -48,35 +51,8 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
}
}
-// Queue of import wrapper keys to compile for an instance.
-class ImportWrapperQueue {
- public:
- // Removes an arbitrary cache key from the queue and returns it.
- // If the queue is empty, returns nullopt.
- // Thread-safe.
- base::Optional<WasmImportWrapperCache::CacheKey> pop() {
- base::Optional<WasmImportWrapperCache::CacheKey> key = base::nullopt;
- base::LockGuard<base::Mutex> lock(&mutex_);
- auto it = queue_.begin();
- if (it != queue_.end()) {
- key = *it;
- queue_.erase(it);
- }
- return key;
- }
-
- // Add the given key to the queue.
- // Not thread-safe.
- void insert(const WasmImportWrapperCache::CacheKey& key) {
- queue_.insert(key);
- }
-
- private:
- base::Mutex mutex_;
- std::unordered_set<WasmImportWrapperCache::CacheKey,
- WasmImportWrapperCache::CacheKeyHash>
- queue_;
-};
+using ImportWrapperQueue = WrapperQueue<WasmImportWrapperCache::CacheKey,
+ WasmImportWrapperCache::CacheKeyHash>;
class CompileImportWrapperTask final : public CancelableTask {
public:
@@ -200,9 +176,9 @@ class InstanceBuilder {
Handle<String> import_name,
Handle<Object> value);
- // Initialize imported tables of type anyfunc.
+ // Initialize imported tables of type funcref.
bool InitializeImportedIndirectFunctionTable(
- Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmInstanceObject> instance, int table_index, int import_index,
Handle<WasmTableObject> table_object);
// Process a single imported table.
@@ -255,7 +231,7 @@ class InstanceBuilder {
// and globals.
void ProcessExports(Handle<WasmInstanceObject> instance);
- void InitializeTables(Handle<WasmInstanceObject> instance);
+ void InitializeIndirectFunctionTables(Handle<WasmInstanceObject> instance);
void LoadTableSegments(Handle<WasmInstanceObject> instance);
@@ -336,8 +312,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
memory->set_is_detachable(false);
DCHECK_IMPLIES(native_module->use_trap_handler(),
- module_->origin == kAsmJsOrigin ||
- memory->is_wasm_memory() ||
+ is_asmjs_module(module_) || memory->is_wasm_memory() ||
memory->backing_store() == nullptr);
} else if (initial_pages > 0 || native_module->use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
@@ -421,15 +396,34 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set up table storage space.
//--------------------------------------------------------------------------
int table_count = static_cast<int>(module_->tables.size());
- Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
- for (int i = module_->num_imported_tables; i < table_count; i++) {
- const WasmTable& table = module_->tables[i];
- Handle<WasmTableObject> table_obj = WasmTableObject::New(
- isolate_, table.type, table.initial_size, table.has_maximum_size,
- table.maximum_size, nullptr);
- tables->set(i, *table_obj);
+ {
+ Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
+ for (int i = module_->num_imported_tables; i < table_count; i++) {
+ const WasmTable& table = module_->tables[i];
+ Handle<WasmTableObject> table_obj = WasmTableObject::New(
+ isolate_, table.type, table.initial_size, table.has_maximum_size,
+ table.maximum_size, nullptr);
+ tables->set(i, *table_obj);
+ }
+ instance->set_tables(*tables);
+ }
+
+ {
+ Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
+ // Table 0 is handled specially. See {InitializeIndirectFunctionTable} for
+ // the initilization. All generated and runtime code will use this optimized
+ // shortcut in the instance. Hence it is safe to start with table 1 in the
+ // iteration below.
+ for (int i = 1; i < table_count; ++i) {
+ const WasmTable& table = module_->tables[i];
+ if (table.type == kWasmFuncRef) {
+ Handle<WasmIndirectFunctionTable> table_obj =
+ WasmIndirectFunctionTable::New(isolate_, table.initial_size);
+ tables->set(i, *table_obj);
+ }
+ }
+ instance->set_indirect_function_tables(*tables);
}
- instance->set_tables(*tables);
//--------------------------------------------------------------------------
// Process the imports for the module.
@@ -446,7 +440,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Initialize the indirect tables.
//--------------------------------------------------------------------------
if (table_count > 0) {
- InitializeTables(instance);
+ InitializeIndirectFunctionTables(instance);
}
//--------------------------------------------------------------------------
@@ -550,9 +544,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
auto& function = module_->functions[start_index];
- Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate_, function.sig, function.imported)
- .ToHandleChecked();
+ Handle<Code> wrapper_code =
+ JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate_, function.sig, function.imported);
// TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
@@ -755,8 +749,8 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
@@ -800,7 +794,7 @@ void InstanceBuilder::SanitizeImports() {
int int_index = static_cast<int>(index);
MaybeHandle<Object> result =
- module_->origin == kAsmJsOrigin
+ is_asmjs_module(module_)
? LookupImportAsm(int_index, import_name)
: LookupImport(int_index, module_name, import_name);
if (thrower_->error()) {
@@ -842,8 +836,10 @@ bool InstanceBuilder::ProcessImportedFunction(
}
auto js_receiver = Handle<JSReceiver>::cast(value);
FunctionSig* expected_sig = module_->functions[func_index].sig;
- auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig,
- enabled_.bigint);
+ auto resolved = compiler::ResolveWasmImportCall(js_receiver, expected_sig,
+ enabled_.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
+ js_receiver = resolved.second;
switch (kind) {
case compiler::WasmImportCallKind::kLinkError:
ReportLinkError("imported function does not match the expected type",
@@ -851,7 +847,7 @@ bool InstanceBuilder::ProcessImportedFunction(
return false;
case compiler::WasmImportCallKind::kWasmToWasm: {
// The imported function is a WASM function from another instance.
- auto imported_function = Handle<WasmExportedFunction>::cast(value);
+ auto imported_function = Handle<WasmExportedFunction>::cast(js_receiver);
Handle<WasmInstanceObject> imported_instance(
imported_function->instance(), isolate_);
// The import reference is the instance object itself.
@@ -866,7 +862,8 @@ bool InstanceBuilder::ProcessImportedFunction(
}
case compiler::WasmImportCallKind::kWasmToCapi: {
NativeModule* native_module = instance->module_object().native_module();
- Address host_address = WasmCapiFunction::cast(*value).GetHostCallTarget();
+ Address host_address =
+ WasmCapiFunction::cast(*js_receiver).GetHostCallTarget();
WasmCodeRefScope code_ref_scope;
WasmCode* wasm_code = compiler::CompileWasmCapiCallWrapper(
isolate_->wasm_engine(), native_module, expected_sig, host_address);
@@ -904,14 +901,12 @@ bool InstanceBuilder::ProcessImportedFunction(
}
bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
- Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmInstanceObject> instance, int table_index, int import_index,
Handle<WasmTableObject> table_object) {
int imported_table_size = table_object->entries().length();
// Allocate a new dispatch table.
- if (!instance->has_indirect_function_table()) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, imported_table_size);
- }
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table_index, imported_table_size);
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_table_size; ++i) {
@@ -919,15 +914,22 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
bool is_null;
MaybeHandle<WasmInstanceObject> maybe_target_instance;
int function_index;
+ MaybeHandle<WasmJSFunction> maybe_js_function;
WasmTableObject::GetFunctionTableEntry(isolate_, table_object, i, &is_valid,
&is_null, &maybe_target_instance,
- &function_index);
+ &function_index, &maybe_js_function);
if (!is_valid) {
thrower_->LinkError("table import %d[%d] is not a wasm function",
import_index, i);
return false;
}
if (is_null) continue;
+ Handle<WasmJSFunction> js_function;
+ if (maybe_js_function.ToHandle(&js_function)) {
+ WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ isolate_, instance, table_index, i, js_function);
+ continue;
+ }
Handle<WasmInstanceObject> target_instance =
maybe_target_instance.ToHandleChecked();
@@ -939,7 +941,7 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
// Look up the signature's canonical id. If there is no canonical
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
- IndirectFunctionTableEntry(instance, i)
+ IndirectFunctionTableEntry(instance, table_index, i)
.Set(module_->signature_map.Find(*sig), target_instance,
function_index);
}
@@ -958,7 +960,6 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
}
const WasmTable& table = module_->tables[table_index];
- instance->tables().set(table_index, *value);
auto table_object = Handle<WasmTableObject>::cast(value);
int imported_table_size = table_object->entries().length();
@@ -995,13 +996,13 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
return false;
}
- // The indirect function table only exists for table 0.
- if (table.type == kWasmAnyFunc && table_index == 0 &&
- !InitializeImportedIndirectFunctionTable(instance, import_index,
- table_object)) {
+ if (table.type == kWasmFuncRef &&
+ !InitializeImportedIndirectFunctionTable(instance, table_index,
+ import_index, table_object)) {
return false;
}
+ instance->tables().set(table_index, *value);
return true;
}
@@ -1068,7 +1069,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
return false;
}
- bool is_sub_type = ValueTypes::IsSubType(global.type, global_object->type());
+ bool is_sub_type = ValueTypes::IsSubType(global_object->type(), global.type);
bool is_same_type = global_object->type() == global.type;
bool valid_type = global.mutability ? is_same_type : is_sub_type;
@@ -1129,7 +1130,7 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
module_name, import_name);
return false;
}
- if (module_->origin == kAsmJsOrigin) {
+ if (is_asmjs_module(module_)) {
// Accepting {JSFunction} on top of just primitive values here is a
// workaround to support legacy asm.js code with broken binding. Note
// that using {NaN} (or Smi::kZero) here is what using the observable
@@ -1162,11 +1163,11 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
if (ValueTypes::IsReferenceType(global.type)) {
// There shouldn't be any null-ref globals.
DCHECK_NE(ValueType::kWasmNullRef, global.type);
- if (global.type == ValueType::kWasmAnyFunc) {
+ if (global.type == ValueType::kWasmFuncRef) {
if (!value->IsNull(isolate_) &&
!WasmExportedFunction::IsWasmExportedFunction(*value)) {
ReportLinkError(
- "imported anyfunc global must be null or an exported function",
+ "imported funcref global must be null or an exported function",
import_index, module_name, import_name);
return false;
}
@@ -1217,8 +1218,9 @@ void InstanceBuilder::CompileImportWrappers(
auto js_receiver = Handle<JSReceiver>::cast(value);
uint32_t func_index = module_->import_table[index].index;
FunctionSig* sig = module_->functions[func_index].sig;
- auto kind =
- compiler::GetWasmImportCallKind(js_receiver, sig, enabled_.bigint);
+ auto resolved =
+ compiler::ResolveWasmImportCall(js_receiver, sig, enabled_.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
kind == compiler::WasmImportCallKind::kLinkError ||
kind == compiler::WasmImportCallKind::kWasmToCapi) {
@@ -1431,7 +1433,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
bool InstanceBuilder::NeedsWrappers() const {
if (module_->num_exported_functions > 0) return true;
for (auto& table : module_->tables) {
- if (table.type == kWasmAnyFunc) return true;
+ if (table.type == kWasmFuncRef) return true;
}
return false;
}
@@ -1458,6 +1460,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
Handle<JSObject> exports_object;
+ MaybeHandle<String> single_function_name;
bool is_asm_js = false;
switch (module_->origin) {
case kWasmOrigin: {
@@ -1465,10 +1468,13 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
exports_object = isolate_->factory()->NewJSObjectWithNullProto();
break;
}
- case kAsmJsOrigin: {
+ case kAsmJsSloppyOrigin:
+ case kAsmJsStrictOrigin: {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
exports_object = isolate_->factory()->NewJSObject(object_function);
+ single_function_name = isolate_->factory()->InternalizeUtf8String(
+ AsmJs::kSingleFunctionName);
is_asm_js = true;
break;
}
@@ -1477,9 +1483,6 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
instance->set_exports_object(*exports_object);
- Handle<String> single_function_name =
- isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
-
PropertyDescriptor desc;
desc.set_writable(is_asm_js);
desc.set_enumerable(true);
@@ -1490,14 +1493,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
isolate_, module_object_, exp.name)
.ToHandleChecked();
- Handle<JSObject> export_to;
- if (is_asm_js && exp.kind == kExternalFunction &&
- String::Equals(isolate_, name, single_function_name)) {
- export_to = instance;
- } else {
- export_to = exports_object;
- }
-
+ Handle<JSObject> export_to = exports_object;
switch (exp.kind) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
@@ -1505,8 +1501,13 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
MaybeHandle<WasmExportedFunction> wasm_exported_function =
WasmInstanceObject::GetOrCreateWasmExportedFunction(
isolate_, instance, exp.index);
-
desc.set_value(wasm_exported_function.ToHandleChecked());
+
+ if (is_asm_js &&
+ String::Equals(isolate_, name,
+ single_function_name.ToHandleChecked())) {
+ export_to = instance;
+ }
break;
}
case kExternalTable: {
@@ -1611,21 +1612,21 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
}
-void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
- size_t table_count = module_->tables.size();
- for (size_t index = 0; index < table_count; ++index) {
- const WasmTable& table = module_->tables[index];
+void InstanceBuilder::InitializeIndirectFunctionTables(
+ Handle<WasmInstanceObject> instance) {
+ for (int i = 0; i < static_cast<int>(module_->tables.size()); ++i) {
+ const WasmTable& table = module_->tables[i];
- if (!instance->has_indirect_function_table() &&
- table.type == kWasmAnyFunc) {
+ if (table.type == kWasmFuncRef) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, table.initial_size);
+ instance, i, table.initial_size);
}
}
}
bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<WasmTableObject> table_object,
+ uint32_t table_index,
const WasmElemSegment& elem_segment, uint32_t dst,
uint32_t src, size_t count) {
// TODO(wasm): Move this functionality into wasm-objects, since it is used
@@ -1642,8 +1643,8 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
int entry_index = static_cast<int>(dst + i);
if (func_index == WasmElemSegment::kNullIndex) {
- if (table_object->type() == kWasmAnyFunc) {
- IndirectFunctionTableEntry(instance, entry_index).clear();
+ if (table_object->type() == kWasmFuncRef) {
+ IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
}
WasmTableObject::Set(isolate, table_object, entry_index,
isolate->factory()->null_value());
@@ -1652,13 +1653,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmFunction* function = &module->functions[func_index];
- // Update the local dispatch table first if necessary. We only have to
- // update the dispatch table if the first table of the instance is changed.
- // For all other tables, function calls do not use a dispatch table at
- // the moment.
- if (elem_segment.table_index == 0 && table_object->type() == kWasmAnyFunc) {
+ // Update the local dispatch table first if necessary.
+ if (table_object->type() == kWasmFuncRef) {
uint32_t sig_id = module->signature_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
@@ -1699,6 +1697,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
// Passive segments are not copied during instantiation.
if (!elem_segment.active) continue;
+ uint32_t table_index = elem_segment.table_index;
uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
uint32_t src = 0;
size_t count = elem_segment.entries.size();
@@ -1708,7 +1707,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
handle(WasmTableObject::cast(
instance->tables().get(elem_segment.table_index)),
isolate_),
- elem_segment, dst, src, count);
+ table_index, elem_segment, dst, src, count);
if (enabled_.bulk_memory) {
if (!success) {
thrower_->LinkError("table initializer is out of bounds");
@@ -1724,7 +1723,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
int table_count = static_cast<int>(module_->tables.size());
for (int index = 0; index < table_count; ++index) {
- if (module_->tables[index].type == kWasmAnyFunc) {
+ if (module_->tables[index].type == kWasmFuncRef) {
auto table_object = handle(
WasmTableObject::cast(instance->tables().get(index)), isolate_);
@@ -1749,19 +1748,12 @@ void InstanceBuilder::InitializeExceptions(
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
- // This code path is only used for passive element segments with the
- // table.init instruction. This instruction was introduced in the
- // bulk-memory-operations proposal. At the moment, table.init can only operate
- // on table-0. If table.init should work for tables with higher indices, then
- // we have to adjust the code in {LoadElemSegmentImpl}. The code there uses
- // {IndirectFunctionTableEntry} at the moment, which only works for table-0.
- CHECK_EQ(table_index, 0);
auto& elem_segment = instance->module()->elem_segments[segment_index];
return LoadElemSegmentImpl(
isolate, instance,
handle(WasmTableObject::cast(instance->tables().get(table_index)),
isolate),
- elem_segment, dst, src, count);
+ table_index, elem_segment, dst, src, count);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 49fd2892eb..bca5c2b941 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -16,6 +16,16 @@ class Signature;
namespace wasm {
+// Type lattice: For any two types connected by a line, the type at the bottom
+// is a subtype of the other type.
+//
+// AnyRef
+// / \
+// FuncRef ExnRef
+// \ /
+// I32 I64 F32 F64 NullRef
+// \ \ \ \ /
+// ------------ Bottom
enum ValueType : uint8_t {
kWasmStmt,
kWasmI32,
@@ -24,10 +34,10 @@ enum ValueType : uint8_t {
kWasmF64,
kWasmS128,
kWasmAnyRef,
- kWasmAnyFunc,
+ kWasmFuncRef,
kWasmNullRef,
- kWasmExceptRef,
- kWasmVar,
+ kWasmExnRef,
+ kWasmBottom,
};
using FunctionSig = Signature<ValueType>;
@@ -178,25 +188,31 @@ class StoreType {
// A collection of ValueType-related static methods.
class V8_EXPORT_PRIVATE ValueTypes {
public:
- static inline bool IsSubType(ValueType expected, ValueType actual) {
+ static inline bool IsSubType(ValueType actual, ValueType expected) {
return (expected == actual) ||
(expected == kWasmAnyRef && actual == kWasmNullRef) ||
- (expected == kWasmAnyRef && actual == kWasmAnyFunc) ||
- (expected == kWasmAnyRef && actual == kWasmExceptRef) ||
- (expected == kWasmAnyFunc && actual == kWasmNullRef) ||
- // TODO(mstarzinger): For now we treat "null_ref" as a sub-type of
- // "except_ref", which is correct but might change. See here:
+ (expected == kWasmAnyRef && actual == kWasmFuncRef) ||
+ (expected == kWasmAnyRef && actual == kWasmExnRef) ||
+ (expected == kWasmFuncRef && actual == kWasmNullRef) ||
+ // TODO(mstarzinger): For now we treat "nullref" as a sub-type of
+ // "exnref", which is correct but might change. See here:
// https://github.com/WebAssembly/exception-handling/issues/55
- (expected == kWasmExceptRef && actual == kWasmNullRef);
+ (expected == kWasmExnRef && actual == kWasmNullRef);
}
static inline bool IsReferenceType(ValueType type) {
- // This function assumes at the moment that it is never called with
- // {kWasmNullRef}. If this assumption is wrong, it should be added to the
- // result calculation below.
- DCHECK_NE(type, kWasmNullRef);
- return type == kWasmAnyRef || type == kWasmAnyFunc ||
- type == kWasmExceptRef;
+ return type == kWasmAnyRef || type == kWasmFuncRef || type == kWasmExnRef;
+ }
+
+ static inline ValueType CommonSubType(ValueType a, ValueType b) {
+ if (a == b) return a;
+ // The only sub type of any value type is {bot}.
+ if (!IsReferenceType(a) || !IsReferenceType(b)) return kWasmBottom;
+ if (IsSubType(a, b)) return a;
+ if (IsSubType(b, a)) return b;
+ // {a} and {b} are not each other's subtype. The biggest sub-type of all
+ // reference types is {kWasmNullRef}.
+ return kWasmNullRef;
}
static byte MemSize(MachineType type) {
@@ -214,8 +230,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmS128:
return 16;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return kSystemPointerSize;
default:
UNREACHABLE();
@@ -232,6 +248,10 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 3;
case kWasmS128:
return 4;
+ case kWasmAnyRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
+ return kSystemPointerSizeLog2;
default:
UNREACHABLE();
}
@@ -253,10 +273,10 @@ class V8_EXPORT_PRIVATE ValueTypes {
return kLocalS128;
case kWasmAnyRef:
return kLocalAnyRef;
- case kWasmAnyFunc:
- return kLocalAnyFunc;
- case kWasmExceptRef:
- return kLocalExceptRef;
+ case kWasmFuncRef:
+ return kLocalFuncRef;
+ case kWasmExnRef:
+ return kLocalExnRef;
case kWasmStmt:
return kLocalVoid;
default:
@@ -275,8 +295,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineType::Float64();
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return MachineType::TaggedPointer();
case kWasmS128:
return MachineType::Simd128();
@@ -298,9 +318,9 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineRepresentation::kFloat64;
case kWasmAnyRef:
- case kWasmAnyFunc:
+ case kWasmFuncRef:
case kWasmNullRef:
- case kWasmExceptRef:
+ case kWasmExnRef:
return MachineRepresentation::kTaggedPointer;
case kWasmS128:
return MachineRepresentation::kSimd128;
@@ -344,13 +364,13 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 'd';
case kWasmAnyRef:
return 'r';
- case kWasmAnyFunc:
+ case kWasmFuncRef:
return 'a';
case kWasmS128:
return 's';
case kWasmStmt:
return 'v';
- case kWasmVar:
+ case kWasmBottom:
return '*';
default:
return '?';
@@ -369,18 +389,18 @@ class V8_EXPORT_PRIVATE ValueTypes {
return "f64";
case kWasmAnyRef:
return "anyref";
- case kWasmAnyFunc:
- return "anyfunc";
+ case kWasmFuncRef:
+ return "funcref";
case kWasmNullRef:
return "nullref";
- case kWasmExceptRef:
+ case kWasmExnRef:
return "exn";
case kWasmS128:
return "s128";
case kWasmStmt:
return "<stmt>";
- case kWasmVar:
- return "<var>";
+ case kWasmBottom:
+ return "<bot>";
default:
return "<unknown>";
}
diff --git a/deps/v8/src/wasm/wasm-arguments.h b/deps/v8/src/wasm/wasm-arguments.h
new file mode 100644
index 0000000000..822f46addd
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-arguments.h
@@ -0,0 +1,73 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_ARGUMENTS_H_
+#define V8_WASM_WASM_ARGUMENTS_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "src/base/memory.h"
+#include "src/codegen/signature.h"
+#include "src/common/globals.h"
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Helper class for {Push}ing Wasm value arguments onto the stack in the format
+// that the CWasmEntryStub expects, as well as for {Pop}ping return values.
+// {Reset} must be called if a packer instance used for pushing is then
+// reused for popping: it resets the internal pointer to the beginning of
+// the stack region.
+class CWasmArgumentsPacker {
+ public:
+ explicit CWasmArgumentsPacker(size_t buffer_size)
+ : heap_buffer_(buffer_size <= kMaxOnStackBuffer ? 0 : buffer_size),
+ buffer_((buffer_size <= kMaxOnStackBuffer) ? on_stack_buffer_
+ : heap_buffer_.data()) {}
+ i::Address argv() const { return reinterpret_cast<i::Address>(buffer_); }
+ void Reset() { offset_ = 0; }
+
+ template <typename T>
+ void Push(T val) {
+ Address address = reinterpret_cast<Address>(buffer_ + offset_);
+ offset_ += sizeof(val);
+ base::WriteUnalignedValue(address, val);
+ }
+
+ template <typename T>
+ T Pop() {
+ Address address = reinterpret_cast<Address>(buffer_ + offset_);
+ offset_ += sizeof(T);
+ return base::ReadUnalignedValue<T>(address);
+ }
+
+ static int TotalSize(FunctionSig* sig) {
+ int return_size = 0;
+ for (ValueType t : sig->returns()) {
+ return_size += ValueTypes::ElementSizeInBytes(t);
+ }
+ int param_size = 0;
+ for (ValueType t : sig->parameters()) {
+ param_size += ValueTypes::ElementSizeInBytes(t);
+ }
+ return std::max(return_size, param_size);
+ }
+
+ private:
+ static const size_t kMaxOnStackBuffer = 10 * i::kSystemPointerSize;
+
+ uint8_t on_stack_buffer_[kMaxOnStackBuffer];
+ std::vector<uint8_t> heap_buffer_;
+ uint8_t* buffer_;
+ size_t offset_ = 0;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_ARGUMENTS_H_
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 2eddce3d95..3d0cde0cce 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -150,7 +150,8 @@ bool WasmCode::ShouldBeLogged(Isolate* isolate) {
// The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
// to call {WasmEngine::EnableCodeLogging} if this return value would change
// for any isolate. Otherwise we might lose code events.
- return isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
+ return isolate->logger()->is_listening_to_code_events() ||
+ isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
isolate->is_profiling();
}
@@ -286,7 +287,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
if (handler_table_size() > 0) {
- HandlerTable table(handler_table(), handler_table_size());
+ HandlerTable table(handler_table(), handler_table_size(),
+ HandlerTable::kReturnAddressBasedEncoding);
os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
<< "):\n";
table.HandlerTableReturnPrint(os);
@@ -403,12 +405,15 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
- bool can_request_more)
+ bool can_request_more,
+ std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
free_code_space_(code_space.region()),
- can_request_more_memory_(can_request_more) {
+ can_request_more_memory_(can_request_more),
+ async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(can_request_more ? 4 : 1);
owned_code_space_.emplace_back(std::move(code_space));
+ async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
WasmCodeAllocator::~WasmCodeAllocator() {
@@ -487,6 +492,8 @@ Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
owned_code_space_.emplace_back(std::move(new_mem));
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
+ async_counters_->wasm_module_num_code_spaces()->AddSample(
+ static_cast<int>(owned_code_space_.size()));
}
const Address commit_page_size = page_allocator->CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
@@ -613,7 +620,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this)
: code_allocator_(engine->code_manager(), std::move(code_space),
- can_request_more),
+ can_request_more, async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
@@ -694,12 +701,26 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
+ if (!lazy_compile_table_) {
+ uint32_t num_slots = module_->num_declared_functions;
+ WasmCodeRefScope code_ref_scope;
+ lazy_compile_table_ = CreateEmptyJumpTable(
+ JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots));
+ JumpTableAssembler::GenerateLazyCompileTable(
+ lazy_compile_table_->instruction_start(), num_slots,
+ module_->num_imported_functions,
+ runtime_stub_entry(WasmCode::kWasmCompileLazy));
+ }
+
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
- JumpTableAssembler::EmitLazyCompileJumpSlot(
- jump_table_->instruction_start(), slot_index, func_index,
- runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
+ Address lazy_compile_target =
+ lazy_compile_table_->instruction_start() +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+ JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
+ slot_index, lazy_compile_target,
+ WasmCode::kFlushICache);
}
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
@@ -713,23 +734,22 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
WasmCode::kRuntimeStubCount));
Address base = jump_table->instruction_start();
EmbeddedData embedded_data = EmbeddedData::FromBlob();
-#define RUNTIME_STUB(Name) {Builtins::k##Name, WasmCode::k##Name},
+#define RUNTIME_STUB(Name) Builtins::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
- std::pair<Builtins::Name, WasmCode::RuntimeStubId> wasm_runtime_stubs[] = {
+ Builtins::Name wasm_runtime_stubs[WasmCode::kRuntimeStubCount] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
- for (auto pair : wasm_runtime_stubs) {
- CHECK(embedded_data.ContainsBuiltin(pair.first));
- Address builtin = embedded_data.InstructionStartOfBuiltin(pair.first);
- JumpTableAssembler::EmitRuntimeStubSlot(base, pair.second, builtin,
- WasmCode::kNoFlushICache);
- uint32_t slot_offset =
- JumpTableAssembler::StubSlotIndexToOffset(pair.second);
- runtime_stub_entries_[pair.second] = base + slot_offset;
+ Address builtin_address[WasmCode::kRuntimeStubCount];
+ for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
+ Builtins::Name builtin = wasm_runtime_stubs[i];
+ CHECK(embedded_data.ContainsBuiltin(builtin));
+ builtin_address[i] = embedded_data.InstructionStartOfBuiltin(builtin);
+ runtime_stub_entries_[i] =
+ base + JumpTableAssembler::StubSlotIndexToOffset(i);
}
- FlushInstructionCache(jump_table->instructions().begin(),
- jump_table->instructions().size());
+ JumpTableAssembler::GenerateRuntimeStubTable(base, builtin_address,
+ WasmCode::kRuntimeStubCount);
DCHECK_NULL(runtime_stub_table_);
runtime_stub_table_ = jump_table;
#else // V8_EMBEDDED_BUILTINS
@@ -822,7 +842,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
std::unique_ptr<WasmCode> new_code{new WasmCode{
this, // native_module
- WasmCode::kAnonymousFuncIndex, // index
+ kAnonymousFuncIndex, // index
dst_code_bytes, // instructions
stack_slots, // stack_slots
0, // tagged_parameter_slots
@@ -920,8 +940,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
code->MaybePrint();
code->Validate();
- code->RegisterTrapHandlerData();
-
return code;
}
@@ -930,27 +948,28 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
return PublishCodeLocked(std::move(code));
}
-namespace {
-WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
- switch (tier) {
- case ExecutionTier::kInterpreter:
+WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
+ switch (result.kind) {
+ case WasmCompilationResult::kWasmToJsWrapper:
+ return WasmCode::Kind::kWasmToJsWrapper;
+ case WasmCompilationResult::kInterpreterEntry:
return WasmCode::Kind::kInterpreterEntry;
- case ExecutionTier::kLiftoff:
- case ExecutionTier::kTurbofan:
+ case WasmCompilationResult::kFunction:
return WasmCode::Kind::kFunction;
- case ExecutionTier::kNone:
+ default:
UNREACHABLE();
}
}
-} // namespace
WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
- if (!code->IsAnonymous()) {
+ if (!code->IsAnonymous() &&
+ code->index() >= module_->num_imported_functions) {
DCHECK_LT(code->index(), num_functions());
- DCHECK_LE(module_->num_imported_functions, code->index());
+
+ code->RegisterTrapHandlerData();
// Assume an order of execution tiers that represents the quality of their
// generated code.
@@ -1017,8 +1036,6 @@ WasmCode* NativeModule::AddDeserializedCode(
std::move(protected_instructions), std::move(reloc_info),
std::move(source_position_table), kind, tier}};
- code->RegisterTrapHandlerData();
-
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
@@ -1056,7 +1073,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
this, // native_module
- WasmCode::kAnonymousFuncIndex, // index
+ kAnonymousFuncIndex, // index
code_space, // instructions
0, // stack_slots
0, // tagged_parameter_slots
@@ -1112,11 +1129,16 @@ WasmCode* NativeModule::Lookup(Address pc) const {
return candidate;
}
+uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
+ uint32_t slot_idx = func_index - module_->num_imported_functions;
+ DCHECK_GT(module_->num_declared_functions, slot_idx);
+ return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
+}
+
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
// Return the jump table slot for that function index.
DCHECK_NOT_NULL(jump_table_);
- uint32_t slot_idx = func_index - module_->num_imported_functions;
- uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
+ uint32_t slot_offset = GetJumpTableOffset(func_index);
DCHECK_LT(slot_offset, jump_table_->instructions().size());
return jump_table_->instruction_start() + slot_offset;
}
@@ -1416,9 +1438,8 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
generated_code.emplace_back(AddCodeWithCodeSpace(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions),
- GetCodeKindForExecutionTier(result.result_tier), result.result_tier,
- this_code_space));
+ std::move(result.source_positions), GetCodeKind(result),
+ result.result_tier, this_code_space));
}
DCHECK_EQ(0, code_space.size());
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 49c287df2c..db7b4f061d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -176,7 +176,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
- static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
private:
@@ -270,6 +269,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
+WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
+
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
@@ -277,7 +278,8 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
class WasmCodeAllocator {
public:
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
- bool can_request_more);
+ bool can_request_more,
+ std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
size_t committed_code_space() const {
@@ -315,7 +317,7 @@ class WasmCodeAllocator {
// Code space that was allocated for code (subset of {owned_code_space_}).
DisjointAllocationPool allocated_code_space_;
// Code space that was allocated before but is dead now. Full pages within
- // this region are discarded. It's still a subset of {owned_code_space_}).
+ // this region are discarded. It's still a subset of {owned_code_space_}.
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
@@ -329,6 +331,8 @@ class WasmCodeAllocator {
bool is_executable_ = false;
const bool can_request_more_memory_;
+
+ std::shared_ptr<Counters> async_counters_;
};
class V8_EXPORT_PRIVATE NativeModule final {
@@ -399,10 +403,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
}
- ptrdiff_t jump_table_offset(uint32_t func_index) const {
- DCHECK_GE(func_index, num_imported_functions());
- return GetCallTargetForFunction(func_index) - jump_table_start();
- }
+ uint32_t GetJumpTableOffset(uint32_t func_index) const;
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
@@ -558,6 +559,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
+ // Lazy compile stub table, containing entries to jump to the
+ // {WasmCompileLazy} builtin, passing the function index.
+ WasmCode* lazy_compile_table_ = nullptr;
+
// The compilation state keeps track of compilation tasks for this module.
// Note that its destructor blocks until all tasks are finished/aborted and
// hence needs to be destructed first when this native module dies.
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index fce60cb593..fbbe19396c 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -26,9 +26,9 @@ enum ValueTypeCode : uint8_t {
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
kLocalS128 = 0x7b,
- kLocalAnyFunc = 0x70,
+ kLocalFuncRef = 0x70,
kLocalAnyRef = 0x6f,
- kLocalExceptRef = 0x68,
+ kLocalExnRef = 0x68,
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
@@ -106,6 +106,8 @@ constexpr WasmCodePosition kNoCodePosition = -1;
constexpr uint32_t kExceptionAttribute = 0;
+constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 33d9a64bf4..2955bc602f 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -617,8 +617,8 @@ Handle<JSObject> WasmDebugInfo::GetLocalScopeObject(
}
// static
-Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
- Handle<WasmDebugInfo> debug_info, wasm::FunctionSig* sig) {
+Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
+ wasm::FunctionSig* sig) {
Isolate* isolate = debug_info->GetIsolate();
DCHECK_EQ(debug_info->has_c_wasm_entries(),
debug_info->has_c_wasm_entry_map());
@@ -642,24 +642,9 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
DCHECK(entries->get(index).IsUndefined(isolate));
Handle<Code> new_entry_code =
compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
- Handle<WasmExportedFunctionData> function_data =
- Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_EXPORTED_FUNCTION_DATA_TYPE, AllocationType::kOld));
- function_data->set_wrapper_code(*new_entry_code);
- function_data->set_instance(debug_info->wasm_instance());
- function_data->set_jump_table_offset(-1);
- function_data->set_function_index(-1);
- Handle<String> name =
- isolate->factory()->InternalizeString(StaticCharVector("c-wasm-entry"));
- NewFunctionArgs args = NewFunctionArgs::ForWasm(
- name, function_data, isolate->sloppy_function_map());
- Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
- new_entry->set_context(debug_info->wasm_instance().native_context());
- new_entry->shared().set_internal_formal_parameter_count(
- compiler::CWasmEntryParameters::kNumParameters);
- entries->set(index, *new_entry);
+ entries->set(index, *new_entry_code);
}
- return handle(JSFunction::cast(entries->get(index)), isolate);
+ return handle(Code::cast(entries->get(index)), isolate);
}
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 83053fd71f..7b91b16b80 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -8,6 +8,7 @@
#include "src/diagnostics/code-tracer.h"
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames.h"
+#include "src/execution/v8threads.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
@@ -88,24 +89,24 @@ class LogCodesTask : public Task {
WasmEngine* const engine_;
};
-class WasmGCForegroundTask : public Task {
- public:
- explicit WasmGCForegroundTask(Isolate* isolate) : isolate_(isolate) {
- DCHECK_NOT_NULL(isolate);
- }
-
- ~WasmGCForegroundTask() {
- // If the isolate is already shutting down, the platform can delete this
- // task without ever executing it. For that case, we need to deregister the
- // task from the engine to avoid UAF.
- if (isolate_) {
- WasmEngine* engine = isolate_->wasm_engine();
- engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
+void CheckNoArchivedThreads(Isolate* isolate) {
+ class ArchivedThreadsVisitor : public ThreadVisitor {
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
+ // Archived threads are rarely used, and not combined with Wasm at the
+ // moment. Implement this and test it properly once we have a use case for
+ // that.
+ FATAL("archived threads in combination with wasm not supported");
}
- }
+ } archived_threads_visitor;
+ isolate->thread_manager()->IterateArchivedThreads(&archived_threads_visitor);
+}
+
+class WasmGCForegroundTask : public CancelableTask {
+ public:
+ explicit WasmGCForegroundTask(Isolate* isolate)
+ : CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
- void Run() final {
- if (isolate_ == nullptr) return; // cancelled.
+ void RunInternal() final {
WasmEngine* engine = isolate_->wasm_engine();
// If the foreground task is executing, there is no wasm code active. Just
// report an empty set of live wasm code.
@@ -114,13 +115,10 @@ class WasmGCForegroundTask : public Task {
DCHECK_NE(StackFrame::WASM_COMPILED, it.frame()->type());
}
#endif
+ CheckNoArchivedThreads(isolate_);
engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
- // Cancel to signal to the destructor that this task executed.
- Cancel();
}
- void Cancel() { isolate_ = nullptr; }
-
private:
Isolate* isolate_;
};
@@ -240,10 +238,13 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
- Handle<HeapNumber> uses_bitset) {
+ Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
+ ModuleOrigin origin = language_mode == LanguageMode::kSloppy
+ ? kAsmJsSloppyOrigin
+ : kAsmJsStrictOrigin;
ModuleResult result =
DecodeWasmModule(kAsmjsWasmFeatures, bytes.start(), bytes.end(), false,
- kAsmJsOrigin, isolate->counters(), allocator());
+ origin, isolate->counters(), allocator());
if (result.failed()) {
// This happens once in a while when we have missed some limit check
// in the asm parser. Output an error message to help diagnose, but crash.
@@ -465,6 +466,9 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
DCHECK_EQ(1, native_modules_.count(native_module));
native_modules_[native_module]->isolates.insert(isolate);
}
+
+ // Finish the Wasm script now and make it public to the debugger.
+ isolate->debug()->OnAfterCompile(script);
return module_object;
}
@@ -524,6 +528,24 @@ bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
return false;
}
+void WasmEngine::DeleteCompileJobsOnContext(Handle<Context> context) {
+ // Under the mutex get all jobs to delete. Then delete them without holding
+ // the mutex, such that deletion can reenter the WasmEngine.
+ std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
+ {
+ base::MutexGuard guard(&mutex_);
+ for (auto it = async_compile_jobs_.begin();
+ it != async_compile_jobs_.end();) {
+ if (!it->first->context().is_identical_to(context)) {
+ ++it;
+ continue;
+ }
+ jobs_to_delete.push_back(std::move(it->second));
+ it = async_compile_jobs_.erase(it);
+ }
+ }
+}
+
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
// Under the mutex get all jobs to delete. Then delete them without holding
// the mutex, such that deletion can reenter the WasmEngine.
@@ -775,6 +797,8 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
live_wasm_code.insert(WasmCompiledFrame::cast(frame)->wasm_code());
}
+ CheckNoArchivedThreads(isolate);
+
ReportLiveCodeForGC(isolate,
OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}
@@ -876,11 +900,7 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
DCHECK(!mutex_.TryLock());
DCHECK_NOT_NULL(current_gc_info_);
- auto it = current_gc_info_->outstanding_isolates.find(isolate);
- if (it == current_gc_info_->outstanding_isolates.end()) return false;
- if (auto* fg_task = it->second) fg_task->Cancel();
- current_gc_info_->outstanding_isolates.erase(it);
- return true;
+ return current_gc_info_->outstanding_isolates.erase(isolate) != 0;
}
void WasmEngine::PotentiallyFinishCurrentGC() {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 2ae3e81368..69e6cdae6e 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -62,7 +62,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
MaybeHandle<AsmWasmData> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
- Handle<HeapNumber> uses_bitset);
+ Handle<HeapNumber> uses_bitset, LanguageMode language_mode);
Handle<WasmModuleObject> FinalizeTranslatedAsmJs(
Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
Handle<Script> script);
@@ -140,6 +140,11 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Isolate is currently running.
bool HasRunningCompileJob(Isolate* isolate);
+ // Deletes all AsyncCompileJobs that belong to the given context. All
+ // compilation is aborted, no more callbacks will be triggered. This is used
+ // when a context is disposed, e.g. because of browser navigation.
+ void DeleteCompileJobsOnContext(Handle<Context> context);
+
// Deletes all AsyncCompileJobs that belong to the given Isolate. All
// compilation is aborted, no more callbacks will be triggered. This is used
// for tearing down an isolate, or to clean it up to be reused.
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 997cf83bb7..08e6139abe 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -29,7 +29,7 @@
#include "src/trap-handler/trap-handler.h"
#endif
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/utils.h"
#include "src/wasm/wasm-external-refs.h"
@@ -37,6 +37,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadUnalignedValue;
+using base::WriteUnalignedValue;
+
void f32_trunc_wrapper(Address data) {
WriteUnalignedValue<float>(data, truncf(ReadUnalignedValue<float>(data)));
}
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index b586d07ff4..9630fa76dd 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -18,6 +18,11 @@ WasmCode*& WasmImportWrapperCache::ModificationScope::operator[](
return cache_->entry_map_[key];
}
+WasmCode*& WasmImportWrapperCache::operator[](
+ const WasmImportWrapperCache::CacheKey& key) {
+ return entry_map_[key];
+}
+
WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
FunctionSig* sig) const {
auto it = entry_map_.find({kind, sig});
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index 62f27cd9a4..e9e60faad4 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -45,6 +45,10 @@ class WasmImportWrapperCache {
base::MutexGuard guard_;
};
+ // Not thread-safe, use ModificationScope to get exclusive write access to the
+ // cache.
+ V8_EXPORT_PRIVATE WasmCode*& operator[](const CacheKey& key);
+
// Assumes the key exists in the map.
V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
FunctionSig* sig) const;
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index f06cead069..4449439896 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -12,7 +12,6 @@
#include "src/compiler/wasm-compiler.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects-inl.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/utils/boxed-float.h"
#include "src/utils/identity-map.h"
#include "src/utils/utils.h"
@@ -21,12 +20,12 @@
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -34,6 +33,11 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadLittleEndianValue;
+using base::ReadUnalignedValue;
+using base::WriteLittleEndianValue;
+using base::WriteUnalignedValue;
+
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
@@ -582,7 +586,7 @@ inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
}
inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
- return static_cast<float>(a);
+ return DoubleToFloat32(a);
}
inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
@@ -816,7 +820,7 @@ class SideTable : public ZoneObject {
bool is_loop = opcode == kExprLoop;
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
@@ -832,7 +836,7 @@ class SideTable : public ZoneObject {
case kExprIf: {
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
@@ -865,7 +869,7 @@ class SideTable : public ZoneObject {
case kExprTry: {
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
@@ -1279,8 +1283,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
@@ -1460,8 +1464,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
val = WasmValue(isolate_->factory()->null_value());
break;
}
@@ -1658,8 +1662,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
- MachineRepresentation rep) {
+ bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
@@ -1672,7 +1676,7 @@ class ThreadImpl {
converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
Push(result);
- len = 1 + imm.length;
+ *len = 1 + imm.length;
if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, false, rep);
@@ -1685,8 +1689,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
- MachineRepresentation rep) {
+ bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
sizeof(ctype));
ctype val = Pop().to<ctype>();
@@ -1698,7 +1702,7 @@ class ThreadImpl {
return false;
}
WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
- len = 1 + imm.length;
+ *len = 1 + imm.length;
if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, true, rep);
@@ -1730,24 +1734,24 @@ class ThreadImpl {
template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
- Address& address, pc_t pc, int& len,
+ Address* address, pc_t pc, int* const len,
type* val = nullptr, type* val2 = nullptr) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
if (val) *val = static_cast<type>(Pop().to<op_type>());
uint32_t index = Pop().to<uint32_t>();
- address = BoundsCheckMem<type>(imm.offset, index);
+ *address = BoundsCheckMem<type>(imm.offset, index);
if (!address) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- len = 2 + imm.length;
+ *len = 2 + imm.length;
return true;
}
bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int& len) {
+ InterpreterCode* code, pc_t pc, int* const len) {
switch (opcode) {
case kExprI32SConvertSatF32:
Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
@@ -1776,7 +1780,7 @@ class ThreadImpl {
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
- len += imm.length;
+ *len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.data_segment_index,
pc)) {
return false;
@@ -1784,6 +1788,9 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
auto src_max =
@@ -1799,7 +1806,7 @@ class ThreadImpl {
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
@@ -1808,11 +1815,15 @@ class ThreadImpl {
}
case kExprMemoryCopy: {
MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ *len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
- bool copy_backward = src < dst && dst - src < size;
+ bool copy_backward = src < dst;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
// Trap without copying any bytes if we are copying backward and the
// copy is partially out-of-bounds. We only need to check that the dst
@@ -1825,25 +1836,27 @@ class ThreadImpl {
memory_copy_wrapper(dst_addr, src_addr, size);
}
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
- len += imm.length;
return ok;
}
case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc + 1));
+ *len += imm.length;
auto size = Pop().to<uint32_t>();
auto value = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
memory_fill_wrapper(dst_addr, value, size);
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
- len += imm.length;
return ok;
}
case kExprTableInit: {
TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.elem_segment_index,
pc)) {
return false;
@@ -1860,7 +1873,7 @@ class ThreadImpl {
}
case kExprElemDrop: {
ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
@@ -1877,9 +1890,64 @@ class ThreadImpl {
isolate_, instance_object_, imm.table_dst.index,
imm.table_src.index, dst, src, size);
if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
- len += imm.length;
+ *len += imm.length;
return ok;
}
+ case kExprTableGrow: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ auto delta = Pop().to<uint32_t>();
+ auto value = Pop().to_anyref();
+ int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
+ Push(WasmValue(result));
+ *len += imm.length;
+ return true;
+ }
+ case kExprTableSize: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ Push(WasmValue(table_size));
+ *len += imm.length;
+ return true;
+ }
+ case kExprTableFill: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto count = Pop().to<uint32_t>();
+ auto value = Pop().to_anyref();
+ auto start = Pop().to<uint32_t>();
+
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ if (start > table_size) {
+ DoTrap(kTrapTableOutOfBounds, pc);
+ return false;
+ }
+
+ // Even when table.fill goes out-of-bounds, as many entries as possible
+ // are put into the table. Only afterwards we trap.
+ uint32_t fill_count = std::min(count, table_size - start);
+ WasmTableObject::Fill(isolate_, table, start, value, fill_count);
+
+ if (fill_count < count) {
+ DoTrap(kTrapTableOutOfBounds, pc);
+ return false;
+ }
+ *len += imm.length;
+ return true;
+ }
default:
FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
OpcodeName(code->start[pc]));
@@ -1911,7 +1979,7 @@ class ThreadImpl {
}
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int& len) {
+ InterpreterCode* code, pc_t pc, int* const len) {
#if V8_TARGET_BIG_ENDIAN
constexpr bool kBigEndian = true;
#else
@@ -1919,27 +1987,27 @@ class ThreadImpl {
#endif
WasmValue result;
switch (opcode) {
-#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- op_type result; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- if (kBigEndian) { \
- auto oplambda = [](type a, type b) { return a op b; }; \
- result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
- } else { \
- result = static_cast<op_type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
- } \
- Push(WasmValue(result)); \
- break; \
+#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ op_type result; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ if (kBigEndian) { \
+ auto oplambda = [](type a, type b) { return a op b; }; \
+ result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
+ } else { \
+ result = static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ } \
+ Push(WasmValue(result)); \
+ break; \
}
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
@@ -2003,24 +2071,24 @@ class ThreadImpl {
ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
atomic_exchange, =);
#undef ATOMIC_BINOP_CASE
-#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
- case kExpr##name: { \
- type old_val; \
- type new_val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &old_val, &new_val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- old_val = AdjustByteOrder<type>(old_val); \
- new_val = AdjustByteOrder<type>(new_val); \
- std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
- Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
- break; \
+#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
+ case kExpr##name: { \
+ type old_val; \
+ type new_val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &old_val, &new_val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ old_val = AdjustByteOrder<type>(old_val); \
+ new_val = AdjustByteOrder<type>(new_val); \
+ std::atomic_compare_exchange_strong( \
+ reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
+ Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
+ break; \
}
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
uint32_t);
@@ -2037,19 +2105,20 @@ class ThreadImpl {
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
uint64_t);
#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
- Push(result); \
- break; \
+#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
+ case kExpr##name: { \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
+ len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
+ Push(result); \
+ break; \
}
ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
@@ -2059,20 +2128,20 @@ class ThreadImpl {
ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
#undef ATOMIC_LOAD_CASE
-#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
- AdjustByteOrder<type>(val)); \
- break; \
+#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
+ AdjustByteOrder<type>(val)); \
+ break; \
}
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
@@ -2082,6 +2151,10 @@ class ThreadImpl {
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
+ case kExprAtomicFence:
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ *len += 2;
+ break;
default:
UNREACHABLE();
return false;
@@ -2118,7 +2191,7 @@ class ThreadImpl {
}
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
- pc_t pc, int& len) {
+ pc_t pc, int* const len) {
switch (opcode) {
#define SPLAT_CASE(format, sType, valType, num) \
case kExpr##format##Splat: { \
@@ -2129,23 +2202,27 @@ class ThreadImpl {
Push(WasmValue(Simd128(s))); \
return true; \
}
- SPLAT_CASE(I32x4, int4, int32_t, 4)
+ SPLAT_CASE(F64x2, float2, double, 2)
SPLAT_CASE(F32x4, float4, float, 4)
+ SPLAT_CASE(I64x2, int2, int64_t, 2)
+ SPLAT_CASE(I32x4, int4, int32_t, 4)
SPLAT_CASE(I16x8, int8, int32_t, 8)
SPLAT_CASE(I8x16, int16, int32_t, 16)
#undef SPLAT_CASE
#define EXTRACT_LANE_CASE(format, name) \
case kExpr##format##ExtractLane: { \
SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue val = Pop(); \
Simd128 s = val.to_s128(); \
auto ss = s.to_##name(); \
Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
return true; \
}
- EXTRACT_LANE_CASE(I32x4, i32x4)
+ EXTRACT_LANE_CASE(F64x2, f64x2)
EXTRACT_LANE_CASE(F32x4, f32x4)
+ EXTRACT_LANE_CASE(I64x2, i64x2)
+ EXTRACT_LANE_CASE(I32x4, i32x4)
EXTRACT_LANE_CASE(I16x8, i16x8)
EXTRACT_LANE_CASE(I8x16, i8x16)
#undef EXTRACT_LANE_CASE
@@ -2169,6 +2246,9 @@ class ThreadImpl {
BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
+ BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
+ BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
+ BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
@@ -2222,10 +2302,13 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
+ UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
+ UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
@@ -2246,12 +2329,32 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
+ CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
+ CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
+ CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
+ CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
+ CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
+ CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
+ CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
+ CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
+ CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
+ CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
+ CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
+ CMPOP_CASE(I64x2GtU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) > static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2GeU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) >= static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2LtU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) < static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2LeU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) <= static_cast<uint64_t>(b))
CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
@@ -2298,7 +2401,7 @@ class ThreadImpl {
#define REPLACE_LANE_CASE(format, name, stype, ctype) \
case kExpr##format##ReplaceLane: { \
SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue new_val = Pop(); \
WasmValue simd_val = Pop(); \
stype s = simd_val.to_s128().to_##name(); \
@@ -2306,7 +2409,9 @@ class ThreadImpl {
Push(WasmValue(Simd128(s))); \
return true; \
}
+ REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
+ REPLACE_LANE_CASE(I64x2, i64x2, int2, int64_t)
REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
@@ -2320,7 +2425,7 @@ class ThreadImpl {
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue v = Pop(); \
stype s = v.to_s128().to_##name(); \
stype res; \
@@ -2331,6 +2436,11 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
+ static_cast<uint64_t>(a) << imm.shift)
+ SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> imm.shift)
+ SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
+ static_cast<uint64_t>(a) >> imm.shift)
SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
static_cast<uint32_t>(a) << imm.shift)
SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
@@ -2452,7 +2562,7 @@ class ThreadImpl {
case kExprS8x16Shuffle: {
Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc));
- len += 16;
+ *len += 16;
int16 v2 = Pop().to_s128().to_i8x16();
int16 v1 = Pop().to_s128().to_i8x16();
int16 res;
@@ -2465,6 +2575,7 @@ class ThreadImpl {
Push(WasmValue(Simd128(res)));
return true;
}
+ case kExprS1x2AnyTrue:
case kExprS1x4AnyTrue:
case kExprS1x8AnyTrue:
case kExprS1x16AnyTrue: {
@@ -2483,6 +2594,7 @@ class ThreadImpl {
Push(WasmValue(res)); \
return true; \
}
+ REDUCTION_CASE(S1x2AllTrue, i64x2, int2, 2, &)
REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
@@ -2583,8 +2695,8 @@ class ThreadImpl {
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
Handle<Object> anyref = value.to_anyref();
encoded_values->set(encoded_index++, *anyref);
break;
@@ -2683,8 +2795,8 @@ class ThreadImpl {
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
value = WasmValue(anyref);
break;
@@ -3005,11 +3117,9 @@ class ThreadImpl {
CallIndirectImmediate<Decoder::kNoValidate> imm(
kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
- // Assume only one table for now.
- DCHECK_LE(module()->tables.size(), 1u);
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, imm.sig_index);
+ CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
@@ -3077,14 +3187,12 @@ class ThreadImpl {
CallIndirectImmediate<Decoder::kNoValidate> imm(
kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
- // Assume only one table for now.
- DCHECK_LE(module()->tables.size(), 1u);
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
// TODO(wasm): Calling functions needs some refactoring to avoid
// multi-exit code like this.
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, imm.sig_index);
+ CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL: {
InterpreterCode* target = result.interpreter_code;
@@ -3141,8 +3249,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
@@ -3156,10 +3264,42 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
-
+ case kExprTableGet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ uint32_t entry_index = Pop().to<uint32_t>();
+ if (entry_index >= table_size) {
+ return DoTrap(kTrapTableOutOfBounds, pc);
+ }
+ Handle<Object> value =
+ WasmTableObject::Get(isolate_, table, entry_index);
+ Push(WasmValue(value));
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprTableSet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ Handle<Object> value = Pop().to_anyref();
+ uint32_t entry_index = Pop().to<uint32_t>();
+ if (entry_index >= table_size) {
+ return DoTrap(kTrapTableOutOfBounds, pc);
+ }
+ WasmTableObject::Set(isolate_, table, entry_index, value);
+ len = 1 + imm.length;
+ break;
+ }
#define LOAD_CASE(name, ctype, mtype, rep) \
case kExpr##name: { \
- if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \
+ if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, &len, \
MachineRepresentation::rep)) \
return; \
break; \
@@ -3183,7 +3323,7 @@ class ThreadImpl {
#define STORE_CASE(name, ctype, mtype, rep) \
case kExpr##name: { \
- if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \
+ if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, &len, \
MachineRepresentation::rep)) \
return; \
break; \
@@ -3300,16 +3440,16 @@ class ThreadImpl {
}
case kNumericPrefix: {
++len;
- if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
break;
}
case kAtomicPrefix: {
- if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteAtomicOp(opcode, &decoder, code, pc, &len)) return;
break;
}
case kSimdPrefix: {
++len;
- if (!ExecuteSimdOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len)) return;
break;
}
@@ -3547,118 +3687,71 @@ class ThreadImpl {
}
Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
- Handle<JSFunction> wasm_entry =
- WasmDebugInfo::GetCWasmEntry(debug_info, sig);
+ Handle<Code> wasm_entry = WasmDebugInfo::GetCWasmEntry(debug_info, sig);
TRACE(" => Calling external wasm function\n");
// Copy the arguments to one buffer.
- // TODO(clemensh): Introduce a helper for all argument buffer
- // con-/destruction.
- std::vector<uint8_t> arg_buffer(num_args * 8);
- size_t offset = 0;
+ CWasmArgumentsPacker packer(CWasmArgumentsPacker::TotalSize(sig));
sp_t base_index = StackHeight() - num_args;
for (int i = 0; i < num_args; ++i) {
- int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(i));
- if (arg_buffer.size() < offset + param_size) {
- arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
- }
- Address address = reinterpret_cast<Address>(arg_buffer.data()) + offset;
WasmValue arg = GetStackValue(base_index + i);
switch (sig->GetParam(i)) {
case kWasmI32:
- WriteUnalignedValue(address, arg.to<uint32_t>());
+ packer.Push(arg.to<uint32_t>());
break;
case kWasmI64:
- WriteUnalignedValue(address, arg.to<uint64_t>());
+ packer.Push(arg.to<uint64_t>());
break;
case kWasmF32:
- WriteUnalignedValue(address, arg.to<float>());
+ packer.Push(arg.to<float>());
break;
case kWasmF64:
- WriteUnalignedValue(address, arg.to<double>());
+ packer.Push(arg.to<double>());
break;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
- DCHECK_EQ(kSystemPointerSize, param_size);
- WriteUnalignedValue<Object>(address, *arg.to_anyref());
+ case kWasmFuncRef:
+ case kWasmExnRef:
+ packer.Push(arg.to_anyref()->ptr());
break;
default:
UNIMPLEMENTED();
}
- offset += param_size;
- }
-
- // Ensure that there is enough space in the arg_buffer to hold the return
- // value(s).
- size_t return_size = 0;
- for (ValueType t : sig->returns()) {
- return_size += ValueTypes::ElementSizeInBytes(t);
- }
- if (arg_buffer.size() < return_size) {
- arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer and the code target data pointers in handles. As
- // these are aligned pointers, to the GC it will look like Smis.
- Handle<Object> arg_buffer_obj(
- Object(reinterpret_cast<Address>(arg_buffer.data())), isolate);
- DCHECK(!arg_buffer_obj->IsHeapObject());
- Handle<Object> code_entry_obj(Object(code->instruction_start()), isolate);
- DCHECK(!code_entry_obj->IsHeapObject());
-
- static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
- "code below needs adaption");
- Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
- args[compiler::CWasmEntryParameters::kCodeEntry] = code_entry_obj;
- args[compiler::CWasmEntryParameters::kObjectRef] = object_ref;
- args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
-
- Handle<Object> receiver = isolate->factory()->undefined_value();
- trap_handler::SetThreadInWasm();
- MaybeHandle<Object> maybe_retval =
- Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
+ Address call_target = code->instruction_start();
+ Execution::CallWasm(isolate, wasm_entry, call_target, object_ref,
+ packer.argv());
TRACE(" => External wasm function returned%s\n",
- maybe_retval.is_null() ? " with exception" : "");
+ isolate->has_pending_exception() ? " with exception" : "");
// Pop arguments off the stack.
Drop(num_args);
- if (maybe_retval.is_null()) {
- // JSEntry may throw a stack overflow before we actually get to wasm code
- // or back to the interpreter, meaning the thread-in-wasm flag won't be
- // cleared.
- if (trap_handler::IsThreadInWasm()) {
- trap_handler::ClearThreadInWasm();
- }
+ if (isolate->has_pending_exception()) {
return TryHandleException(isolate);
}
- trap_handler::ClearThreadInWasm();
-
// Push return values.
- if (sig->return_count() > 0) {
- // TODO(wasm): Handle multiple returns.
- DCHECK_EQ(1, sig->return_count());
- Address address = reinterpret_cast<Address>(arg_buffer.data());
- switch (sig->GetReturn()) {
+ packer.Reset();
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ switch (sig->GetReturn(i)) {
case kWasmI32:
- Push(WasmValue(ReadUnalignedValue<uint32_t>(address)));
+ Push(WasmValue(packer.Pop<uint32_t>()));
break;
case kWasmI64:
- Push(WasmValue(ReadUnalignedValue<uint64_t>(address)));
+ Push(WasmValue(packer.Pop<uint64_t>()));
break;
case kWasmF32:
- Push(WasmValue(ReadUnalignedValue<float>(address)));
+ Push(WasmValue(packer.Pop<float>()));
break;
case kWasmF64:
- Push(WasmValue(ReadUnalignedValue<double>(address)));
+ Push(WasmValue(packer.Pop<double>()));
break;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
- Handle<Object> ref(ReadUnalignedValue<Object>(address), isolate);
+ case kWasmFuncRef:
+ case kWasmExnRef: {
+ Handle<Object> ref(Object(packer.Pop<Address>()), isolate);
Push(WasmValue(ref));
break;
}
@@ -3710,25 +3803,24 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
uint32_t expected_sig_id = module()->signature_ids[sig_index];
DCHECK_EQ(expected_sig_id,
module()->signature_map.Find(*module()->signatures[sig_index]));
-
- // The function table is stored in the instance.
- // TODO(wasm): the wasm interpreter currently supports only one table.
- CHECK_EQ(0, table_index);
// Bounds check against table size.
- if (entry_index >= instance_object_->indirect_function_table_size()) {
+ if (entry_index >=
+ static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
+ isolate_, instance_object_, table_index))) {
return {ExternalCallResult::INVALID_FUNC};
}
- IndirectFunctionTableEntry entry(instance_object_, entry_index);
+ IndirectFunctionTableEntry entry(instance_object_, table_index,
+ entry_index);
// Signature check.
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
FunctionSig* signature = module()->signatures[sig_index];
Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
WasmCode* code = GetTargetCode(isolate_, entry.target());
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index fb633c6c26..1ee76fc11d 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1094,7 +1094,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!value->ToString(context).ToLocal(&string)) return;
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
- type = i::wasm::kWasmAnyFunc;
+ type = i::wasm::kWasmFuncRef;
} else if (enabled_features.anyref &&
string->StringEquals(v8_str(isolate, "anyref"))) {
type = i::wasm::kWasmAnyRef;
@@ -1222,7 +1222,7 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
*type = i::wasm::kWasmAnyRef;
} else if (enabled_features.anyref &&
string->StringEquals(v8_str(isolate, "anyfunc"))) {
- *type = i::wasm::kWasmAnyFunc;
+ *type = i::wasm::kWasmFuncRef;
} else {
// Unrecognized type.
*type = i::wasm::kWasmStmt;
@@ -1322,7 +1322,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Number> number_value;
if (!value->ToNumber(context).ToLocal(&number_value)) return;
if (!number_value->NumberValue(context).To(&f64_value)) return;
- f32_value = static_cast<float>(f64_value);
+ f32_value = i::DoubleToFloat32(f64_value);
}
global_obj->SetF32(f32_value);
break;
@@ -1347,15 +1347,15 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetAnyRef(Utils::OpenHandle(*value));
break;
}
- case i::wasm::kWasmAnyFunc: {
+ case i::wasm::kWasmFuncRef: {
if (args.Length() < 2) {
// When no inital value is provided, we have to use the WebAssembly
// default value 'null', and not the JS default value 'undefined'.
- global_obj->SetAnyFunc(i_isolate, i_isolate->factory()->null_value());
+ global_obj->SetFuncRef(i_isolate, i_isolate->factory()->null_value());
break;
}
- if (!global_obj->SetAnyFunc(i_isolate, Utils::OpenHandle(*value))) {
+ if (!global_obj->SetFuncRef(i_isolate, Utils::OpenHandle(*value))) {
thrower.TypeError(
"The value of anyfunc globals must be null or an "
"exported function");
@@ -1437,7 +1437,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Decode the function type and construct a signature.
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
- i::wasm::FunctionSig::Builder builder(&zone, parameters_len, results_len);
+ i::wasm::FunctionSig::Builder builder(&zone, results_len, parameters_len);
for (uint32_t i = 0; i < parameters_len; ++i) {
i::wasm::ValueType type;
MaybeLocal<Value> maybe = parameters->Get(context, i);
@@ -1513,13 +1513,12 @@ void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Function.type()");
i::wasm::FunctionSig* sig;
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
if (i::WasmExportedFunction::IsWasmExportedFunction(*arg0)) {
sig = i::Handle<i::WasmExportedFunction>::cast(arg0)->sig();
} else if (i::WasmJSFunction::IsWasmJSFunction(*arg0)) {
- // TODO(7742): Implement deserialization of signature.
- sig = nullptr;
- UNIMPLEMENTED();
+ sig = i::Handle<i::WasmJSFunction>::cast(arg0)->GetSignature(&zone);
} else {
thrower.TypeError("Argument 0 must be a WebAssembly.Function");
return;
@@ -1686,7 +1685,7 @@ void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<String> element;
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
- if (table->type() == i::wasm::ValueType::kWasmAnyFunc) {
+ if (table->type() == i::wasm::ValueType::kWasmFuncRef) {
element = v8_str(isolate, "anyfunc");
} else if (enabled_features.anyref &&
table->type() == i::wasm::ValueType::kWasmAnyRef) {
@@ -1694,7 +1693,6 @@ void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
} else {
UNREACHABLE();
}
- // TODO(aseemgarg): update anyfunc to funcref
if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
v8_str(isolate, "element"), element)
.IsJust()) {
@@ -1865,8 +1863,8 @@ void WebAssemblyGlobalGetValueCommon(
return_value.Set(receiver->GetF64());
break;
case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmAnyFunc:
- case i::wasm::kWasmExceptRef:
+ case i::wasm::kWasmFuncRef:
+ case i::wasm::kWasmExnRef:
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
default:
@@ -1925,7 +1923,7 @@ void WebAssemblyGlobalSetValue(
case i::wasm::kWasmF32: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
- receiver->SetF32(static_cast<float>(f64_value));
+ receiver->SetF32(i::DoubleToFloat32(f64_value));
break;
}
case i::wasm::kWasmF64: {
@@ -1935,12 +1933,12 @@ void WebAssemblyGlobalSetValue(
break;
}
case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmExceptRef: {
+ case i::wasm::kWasmExnRef: {
receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
break;
}
- case i::wasm::kWasmAnyFunc: {
- if (!receiver->SetAnyFunc(i_isolate, Utils::OpenHandle(*args[0]))) {
+ case i::wasm::kWasmFuncRef: {
+ if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
thrower.TypeError(
"value of an anyfunc reference must be either null or an "
"exported function");
@@ -2245,7 +2243,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
if (enabled_features.type_reflection) {
Handle<JSFunction> function_constructor = InstallConstructorFunc(
isolate, webassembly, "Function", WebAssemblyFunction);
- context->set_wasm_function_constructor(*function_constructor);
SetDummyInstanceTemplate(isolate, function_constructor);
JSFunction::EnsureHasInitialMap(function_constructor);
Handle<JSObject> function_proto(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 8633a61504..f203649542 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -40,6 +40,9 @@ bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
*did_retry = true;
if (trial == kAllocationRetries) return false;
// Otherwise, collect garbage and retry.
+ // TODO(wasm): Since reservation limits are engine-wide, we should do an
+ // engine-wide GC here (i.e. trigger a GC in each isolate using the engine,
+ // and wait for them all to finish). See https://crbug.com/v8/9405.
heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index eb253219ad..7dd6b1c7b2 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -16,7 +16,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
namespace v8 {
namespace internal {
@@ -26,18 +26,18 @@ namespace {
// Emit a section code and the size as a padded varint that can be patched
// later.
-size_t EmitSection(SectionCode code, ZoneBuffer& buffer) {
+size_t EmitSection(SectionCode code, ZoneBuffer* buffer) {
// Emit the section code.
- buffer.write_u8(code);
+ buffer->write_u8(code);
// Emit a placeholder for the length.
- return buffer.reserve_u32v();
+ return buffer->reserve_u32v();
}
// Patch the size of a section after it's finished.
-void FixupSection(ZoneBuffer& buffer, size_t start) {
- buffer.patch_u32v(start, static_cast<uint32_t>(buffer.offset() - start -
- kPaddedVarInt32Size));
+void FixupSection(ZoneBuffer* buffer, size_t start) {
+ buffer->patch_u32v(start, static_cast<uint32_t>(buffer->offset() - start -
+ kPaddedVarInt32Size));
}
} // namespace
@@ -186,22 +186,22 @@ void WasmFunctionBuilder::DeleteCodeAfter(size_t position) {
body_.Truncate(position);
}
-void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
- buffer.write_u32v(signature_index_);
+void WasmFunctionBuilder::WriteSignature(ZoneBuffer* buffer) const {
+ buffer->write_u32v(signature_index_);
}
-void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
+void WasmFunctionBuilder::WriteBody(ZoneBuffer* buffer) const {
size_t locals_size = locals_.Size();
- buffer.write_size(locals_size + body_.size());
- buffer.EnsureSpace(locals_size);
- byte** ptr = buffer.pos_ptr();
+ buffer->write_size(locals_size + body_.size());
+ buffer->EnsureSpace(locals_size);
+ byte** ptr = buffer->pos_ptr();
locals_.Emit(*ptr);
(*ptr) += locals_size; // UGLY: manual bump of position pointer
if (body_.size() > 0) {
- size_t base = buffer.offset();
- buffer.write(body_.begin(), body_.size());
+ size_t base = buffer->offset();
+ buffer->write(body_.begin(), body_.size());
for (DirectCallIndex call : direct_calls_) {
- buffer.patch_u32v(
+ buffer->patch_u32v(
base + call.offset,
call.direct_index +
static_cast<uint32_t>(builder_->function_imports_.size()));
@@ -209,29 +209,29 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
}
}
-void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
+void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer* buffer) const {
if (asm_func_start_source_position_ == 0 && asm_offsets_.size() == 0) {
- buffer.write_size(0);
+ buffer->write_size(0);
return;
}
size_t locals_enc_size = LEBHelper::sizeof_u32v(locals_.Size());
size_t func_start_size =
LEBHelper::sizeof_u32v(asm_func_start_source_position_);
- buffer.write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
+ buffer->write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
// Offset of the recorded byte offsets.
DCHECK_GE(kMaxUInt32, locals_.Size());
- buffer.write_u32v(static_cast<uint32_t>(locals_.Size()));
+ buffer->write_u32v(static_cast<uint32_t>(locals_.Size()));
// Start position of the function.
- buffer.write_u32v(asm_func_start_source_position_);
- buffer.write(asm_offsets_.begin(), asm_offsets_.size());
+ buffer->write_u32v(asm_func_start_source_position_);
+ buffer->write(asm_offsets_.begin(), asm_offsets_.size());
}
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
function_imports_(zone),
- function_exports_(zone),
global_imports_(zone),
+ exports_(zone),
functions_(zone),
data_segments_(zone),
indirect_functions_(zone),
@@ -274,7 +274,10 @@ uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
if (count > FLAG_wasm_max_table_size - index) {
return std::numeric_limits<uint32_t>::max();
}
- indirect_functions_.resize(indirect_functions_.size() + count);
+ DCHECK(max_table_size_ == 0 ||
+ indirect_functions_.size() + count <= max_table_size_);
+ indirect_functions_.resize(indirect_functions_.size() + count,
+ WasmElemSegment::kNullIndex);
return index;
}
@@ -283,15 +286,23 @@ void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
indirect_functions_[indirect] = direct;
}
+void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
+ DCHECK_GE(FLAG_wasm_max_table_size, max);
+ DCHECK_GE(max, indirect_functions_.size());
+ max_table_size_ = max;
+}
+
uint32_t WasmModuleBuilder::AddImport(Vector<const char> name,
FunctionSig* sig) {
+ DCHECK(adding_imports_allowed_);
function_imports_.push_back({name, AddSignature(sig)});
return static_cast<uint32_t>(function_imports_.size() - 1);
}
uint32_t WasmModuleBuilder::AddGlobalImport(Vector<const char> name,
- ValueType type) {
- global_imports_.push_back({name, ValueTypes::ValueTypeCodeFor(type)});
+ ValueType type, bool mutability) {
+ global_imports_.push_back(
+ {name, ValueTypes::ValueTypeCodeFor(type), mutability});
return static_cast<uint32_t>(global_imports_.size() - 1);
}
@@ -300,14 +311,33 @@ void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
}
void WasmModuleBuilder::AddExport(Vector<const char> name,
- WasmFunctionBuilder* function) {
- function_exports_.push_back({name, function->func_index()});
+ ImportExportKindCode kind, uint32_t index) {
+ DCHECK_LE(index, std::numeric_limits<int>::max());
+ exports_.push_back({name, kind, static_cast<int>(index)});
+}
+
+uint32_t WasmModuleBuilder::AddExportedGlobal(ValueType type, bool mutability,
+ const WasmInitExpr& init,
+ Vector<const char> name) {
+ uint32_t index = AddGlobal(type, mutability, init);
+ AddExport(name, kExternalGlobal, index);
+ return index;
+}
+
+void WasmModuleBuilder::ExportImportedFunction(Vector<const char> name,
+ int import_index) {
+#if DEBUG
+ // The size of function_imports_ must not change any more.
+ adding_imports_allowed_ = false;
+#endif
+ exports_.push_back(
+ {name, kExternalFunction,
+ import_index - static_cast<int>(function_imports_.size())});
}
-uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool exported,
- bool mutability,
+uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool mutability,
const WasmInitExpr& init) {
- globals_.push_back({type, exported, mutability, init});
+ globals_.push_back({type, mutability, init});
return static_cast<uint32_t>(globals_.size() - 1);
}
@@ -322,25 +352,25 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
-void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
+void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// == Emit magic =============================================================
- buffer.write_u32(kWasmMagic);
- buffer.write_u32(kWasmVersion);
+ buffer->write_u32(kWasmMagic);
+ buffer->write_u32(kWasmVersion);
// == Emit signatures ========================================================
if (signatures_.size() > 0) {
size_t start = EmitSection(kTypeSectionCode, buffer);
- buffer.write_size(signatures_.size());
+ buffer->write_size(signatures_.size());
for (FunctionSig* sig : signatures_) {
- buffer.write_u8(kWasmFunctionTypeCode);
- buffer.write_size(sig->parameter_count());
+ buffer->write_u8(kWasmFunctionTypeCode);
+ buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(param));
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(param));
}
- buffer.write_size(sig->return_count());
+ buffer->write_size(sig->return_count());
for (auto ret : sig->returns()) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(ret));
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(ret));
}
}
FixupSection(buffer, start);
@@ -349,19 +379,19 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit imports ===========================================================
if (global_imports_.size() + function_imports_.size() > 0) {
size_t start = EmitSection(kImportSectionCode, buffer);
- buffer.write_size(global_imports_.size() + function_imports_.size());
+ buffer->write_size(global_imports_.size() + function_imports_.size());
for (auto import : global_imports_) {
- buffer.write_u32v(0); // module name (length)
- buffer.write_string(import.name); // field name
- buffer.write_u8(kExternalGlobal);
- buffer.write_u8(import.type_code);
- buffer.write_u8(0); // immutable
+ buffer->write_u32v(0); // module name (length)
+ buffer->write_string(import.name); // field name
+ buffer->write_u8(kExternalGlobal);
+ buffer->write_u8(import.type_code);
+ buffer->write_u8(import.mutability ? 1 : 0);
}
for (auto import : function_imports_) {
- buffer.write_u32v(0); // module name (length)
- buffer.write_string(import.name); // field name
- buffer.write_u8(kExternalFunction);
- buffer.write_u32v(import.sig_index);
+ buffer->write_u32v(0); // module name (length)
+ buffer->write_string(import.name); // field name
+ buffer->write_u8(kExternalFunction);
+ buffer->write_u32v(import.sig_index);
}
FixupSection(buffer, start);
}
@@ -370,7 +400,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
uint32_t num_function_names = 0;
if (functions_.size() > 0) {
size_t start = EmitSection(kFunctionSectionCode, buffer);
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
for (auto* function : functions_) {
function->WriteSignature(buffer);
if (!function->name_.empty()) ++num_function_names;
@@ -381,28 +411,31 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit function table ====================================================
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kTableSectionCode, buffer);
- buffer.write_u8(1); // table count
- buffer.write_u8(kLocalAnyFunc);
- buffer.write_u8(kHasMaximumFlag);
- buffer.write_size(indirect_functions_.size());
- buffer.write_size(indirect_functions_.size());
+ buffer->write_u8(1); // table count
+ buffer->write_u8(kLocalFuncRef);
+ buffer->write_u8(kHasMaximumFlag);
+ buffer->write_size(indirect_functions_.size());
+ size_t max =
+ max_table_size_ > 0 ? max_table_size_ : indirect_functions_.size();
+ DCHECK_GE(max, indirect_functions_.size());
+ buffer->write_size(max);
FixupSection(buffer, start);
}
// == emit memory declaration ================================================
{
size_t start = EmitSection(kMemorySectionCode, buffer);
- buffer.write_u8(1); // memory count
+ buffer->write_u8(1); // memory count
if (has_shared_memory_) {
- buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
- : MemoryFlags::kSharedNoMaximum);
+ buffer->write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
+ : MemoryFlags::kSharedNoMaximum);
} else {
- buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
- : MemoryFlags::kNoMaximum);
+ buffer->write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
+ : MemoryFlags::kNoMaximum);
}
- buffer.write_u32v(min_memory_size_);
+ buffer->write_u32v(min_memory_size_);
if (has_max_memory_size_) {
- buffer.write_u32v(max_memory_size_);
+ buffer->write_u32v(max_memory_size_);
}
FixupSection(buffer, start);
}
@@ -410,76 +443,90 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit globals ===========================================================
if (globals_.size() > 0) {
size_t start = EmitSection(kGlobalSectionCode, buffer);
- buffer.write_size(globals_.size());
+ buffer->write_size(globals_.size());
for (auto global : globals_) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(global.type));
- buffer.write_u8(global.mutability ? 1 : 0);
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(global.type));
+ buffer->write_u8(global.mutability ? 1 : 0);
switch (global.init.kind) {
case WasmInitExpr::kI32Const:
DCHECK_EQ(kWasmI32, global.type);
- buffer.write_u8(kExprI32Const);
- buffer.write_i32v(global.init.val.i32_const);
+ buffer->write_u8(kExprI32Const);
+ buffer->write_i32v(global.init.val.i32_const);
break;
case WasmInitExpr::kI64Const:
DCHECK_EQ(kWasmI64, global.type);
- buffer.write_u8(kExprI64Const);
- buffer.write_i64v(global.init.val.i64_const);
+ buffer->write_u8(kExprI64Const);
+ buffer->write_i64v(global.init.val.i64_const);
break;
case WasmInitExpr::kF32Const:
DCHECK_EQ(kWasmF32, global.type);
- buffer.write_u8(kExprF32Const);
- buffer.write_f32(global.init.val.f32_const);
+ buffer->write_u8(kExprF32Const);
+ buffer->write_f32(global.init.val.f32_const);
break;
case WasmInitExpr::kF64Const:
DCHECK_EQ(kWasmF64, global.type);
- buffer.write_u8(kExprF64Const);
- buffer.write_f64(global.init.val.f64_const);
+ buffer->write_u8(kExprF64Const);
+ buffer->write_f64(global.init.val.f64_const);
break;
case WasmInitExpr::kGlobalIndex:
- buffer.write_u8(kExprGetGlobal);
- buffer.write_u32v(global.init.val.global_index);
+ buffer->write_u8(kExprGetGlobal);
+ buffer->write_u32v(global.init.val.global_index);
break;
default: {
// No initializer, emit a default value.
switch (global.type) {
case kWasmI32:
- buffer.write_u8(kExprI32Const);
+ buffer->write_u8(kExprI32Const);
// LEB encoding of 0.
- buffer.write_u8(0);
+ buffer->write_u8(0);
break;
case kWasmI64:
- buffer.write_u8(kExprI64Const);
+ buffer->write_u8(kExprI64Const);
// LEB encoding of 0.
- buffer.write_u8(0);
+ buffer->write_u8(0);
break;
case kWasmF32:
- buffer.write_u8(kExprF32Const);
- buffer.write_f32(0.f);
+ buffer->write_u8(kExprF32Const);
+ buffer->write_f32(0.f);
break;
case kWasmF64:
- buffer.write_u8(kExprF64Const);
- buffer.write_f64(0.);
+ buffer->write_u8(kExprF64Const);
+ buffer->write_f64(0.);
break;
default:
UNREACHABLE();
}
}
}
- buffer.write_u8(kExprEnd);
+ buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
}
// == emit exports ===========================================================
- if (!function_exports_.empty()) {
+ if (exports_.size() > 0) {
size_t start = EmitSection(kExportSectionCode, buffer);
- buffer.write_size(function_exports_.size());
- for (auto function_export : function_exports_) {
- buffer.write_string(function_export.name);
- buffer.write_u8(kExternalFunction);
- buffer.write_size(function_export.function_index +
- function_imports_.size());
+ buffer->write_size(exports_.size());
+ for (auto ex : exports_) {
+ buffer->write_string(ex.name);
+ buffer->write_u8(ex.kind);
+ switch (ex.kind) {
+ case kExternalFunction:
+ buffer->write_size(ex.index + function_imports_.size());
+ break;
+ case kExternalGlobal:
+ buffer->write_size(ex.index + global_imports_.size());
+ break;
+ case kExternalMemory:
+ case kExternalTable:
+ // The WasmModuleBuilder doesn't support importing tables or memories
+ // yet, so there is no index offset to add.
+ buffer->write_size(ex.index);
+ break;
+ case kExternalException:
+ UNREACHABLE();
+ }
}
FixupSection(buffer, start);
}
@@ -487,22 +534,33 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit start function index ==============================================
if (start_function_index_ >= 0) {
size_t start = EmitSection(kStartSectionCode, buffer);
- buffer.write_size(start_function_index_ + function_imports_.size());
+ buffer->write_size(start_function_index_ + function_imports_.size());
FixupSection(buffer, start);
}
// == emit function table elements ===========================================
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kElementSectionCode, buffer);
- buffer.write_u8(1); // count of entries
- buffer.write_u8(0); // table index
- buffer.write_u8(kExprI32Const); // offset
- buffer.write_u32v(0);
- buffer.write_u8(kExprEnd);
- buffer.write_size(indirect_functions_.size()); // element count
-
- for (auto index : indirect_functions_) {
- buffer.write_size(index + function_imports_.size());
+ buffer->write_u8(1); // count of entries
+ buffer->write_u8(0); // table index
+ uint32_t first_element = 0;
+ while (first_element < indirect_functions_.size() &&
+ indirect_functions_[first_element] == WasmElemSegment::kNullIndex) {
+ first_element++;
+ }
+ uint32_t last_element =
+ static_cast<uint32_t>(indirect_functions_.size() - 1);
+ while (last_element >= first_element &&
+ indirect_functions_[last_element] == WasmElemSegment::kNullIndex) {
+ last_element--;
+ }
+ buffer->write_u8(kExprI32Const); // offset
+ buffer->write_u32v(first_element);
+ buffer->write_u8(kExprEnd);
+ uint32_t element_count = last_element - first_element + 1;
+ buffer->write_size(element_count);
+ for (uint32_t i = first_element; i <= last_element; i++) {
+ buffer->write_size(indirect_functions_[i] + function_imports_.size());
}
FixupSection(buffer, start);
@@ -518,18 +576,18 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
if (emit_compilation_hints) {
// Emit the section code.
- buffer.write_u8(kUnknownSectionCode);
+ buffer->write_u8(kUnknownSectionCode);
// Emit a placeholder for section length.
- size_t start = buffer.reserve_u32v();
+ size_t start = buffer->reserve_u32v();
// Emit custom section name.
- buffer.write_string(CStrVector("compilationHints"));
+ buffer->write_string(CStrVector("compilationHints"));
// Emit hint count.
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
// Emit hint bytes.
for (auto* fn : functions_) {
uint8_t hint_byte =
fn->hint_ != kNoCompilationHint ? fn->hint_ : kDefaultCompilationHint;
- buffer.write_u8(hint_byte);
+ buffer->write_u8(hint_byte);
}
FixupSection(buffer, start);
}
@@ -537,7 +595,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit code ==============================================================
if (functions_.size() > 0) {
size_t start = EmitSection(kCodeSectionCode, buffer);
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
for (auto* function : functions_) {
function->WriteBody(buffer);
}
@@ -547,15 +605,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit data segments =====================================================
if (data_segments_.size() > 0) {
size_t start = EmitSection(kDataSectionCode, buffer);
- buffer.write_size(data_segments_.size());
+ buffer->write_size(data_segments_.size());
for (auto segment : data_segments_) {
- buffer.write_u8(0); // linear memory segment
- buffer.write_u8(kExprI32Const); // initializer expression for dest
- buffer.write_u32v(segment.dest);
- buffer.write_u8(kExprEnd);
- buffer.write_u32v(static_cast<uint32_t>(segment.data.size()));
- buffer.write(&segment.data[0], segment.data.size());
+ buffer->write_u8(0); // linear memory segment
+ buffer->write_u8(kExprI32Const); // initializer expression for dest
+ buffer->write_u32v(segment.dest);
+ buffer->write_u8(kExprEnd);
+ buffer->write_u32v(static_cast<uint32_t>(segment.data.size()));
+ buffer->write(&segment.data[0], segment.data.size());
}
FixupSection(buffer, start);
}
@@ -563,33 +621,33 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit names =============================================================
if (num_function_names > 0 || !function_imports_.empty()) {
// Emit the section code.
- buffer.write_u8(kUnknownSectionCode);
+ buffer->write_u8(kUnknownSectionCode);
// Emit a placeholder for the length.
- size_t start = buffer.reserve_u32v();
+ size_t start = buffer->reserve_u32v();
// Emit the section string.
- buffer.write_string(CStrVector("name"));
+ buffer->write_string(CStrVector("name"));
// Emit a subsection for the function names.
- buffer.write_u8(NameSectionKindCode::kFunction);
+ buffer->write_u8(NameSectionKindCode::kFunction);
// Emit a placeholder for the subsection length.
- size_t functions_start = buffer.reserve_u32v();
+ size_t functions_start = buffer->reserve_u32v();
// Emit the function names.
// Imports are always named.
uint32_t num_imports = static_cast<uint32_t>(function_imports_.size());
- buffer.write_size(num_imports + num_function_names);
+ buffer->write_size(num_imports + num_function_names);
uint32_t function_index = 0;
for (; function_index < num_imports; ++function_index) {
const WasmFunctionImport* import = &function_imports_[function_index];
DCHECK(!import->name.empty());
- buffer.write_u32v(function_index);
- buffer.write_string(import->name);
+ buffer->write_u32v(function_index);
+ buffer->write_string(import->name);
}
if (num_function_names > 0) {
for (auto* function : functions_) {
DCHECK_EQ(function_index,
function->func_index() + function_imports_.size());
if (!function->name_.empty()) {
- buffer.write_u32v(function_index);
- buffer.write_string(function->name_);
+ buffer->write_u32v(function_index);
+ buffer->write_string(function->name_);
}
++function_index;
}
@@ -599,15 +657,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
}
-void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer& buffer) const {
+void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer* buffer) const {
// == Emit asm.js offset table ===============================================
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
// Emit the offset table per function.
for (auto* function : functions_) {
function->WriteAsmWasmOffsetTable(buffer);
}
// Append a 0 to indicate that this is an encoded table.
- buffer.write_u8(0);
+ buffer->write_u8(0);
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 750dafa227..9e6a8933e2 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -8,7 +8,7 @@
#include "src/codegen/signature.h"
#include "src/zone/zone-containers.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/vector.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
@@ -36,19 +36,19 @@ class ZoneBuffer : public ZoneObject {
void write_u16(uint16_t x) {
EnsureSpace(2);
- WriteLittleEndianValue<uint16_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint16_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 2;
}
void write_u32(uint32_t x) {
EnsureSpace(4);
- WriteLittleEndianValue<uint32_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint32_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 4;
}
void write_u64(uint64_t x) {
EnsureSpace(8);
- WriteLittleEndianValue<uint64_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint64_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 8;
}
@@ -187,9 +187,9 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
}
void DeleteCodeAfter(size_t position);
- void WriteSignature(ZoneBuffer& buffer) const;
- void WriteBody(ZoneBuffer& buffer) const;
- void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
+ void WriteSignature(ZoneBuffer* buffer) const;
+ void WriteBody(ZoneBuffer* buffer) const;
+ void WriteAsmWasmOffsetTable(ZoneBuffer* buffer) const;
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
@@ -231,22 +231,34 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Building methods.
uint32_t AddImport(Vector<const char> name, FunctionSig* sig);
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
- uint32_t AddGlobal(ValueType type, bool exported, bool mutability = true,
+ uint32_t AddGlobal(ValueType type, bool mutability = true,
const WasmInitExpr& init = WasmInitExpr());
- uint32_t AddGlobalImport(Vector<const char> name, ValueType type);
+ uint32_t AddGlobalImport(Vector<const char> name, ValueType type,
+ bool mutability);
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
+ // In the current implementation, it's supported to have uninitialized slots
+ // at the beginning and/or end of the indirect function table, as long as
+ // the filled slots form a contiguous block in the middle.
uint32_t AllocateIndirectFunctions(uint32_t count);
void SetIndirectFunction(uint32_t indirect, uint32_t direct);
+ void SetMaxTableSize(uint32_t max);
void MarkStartFunction(WasmFunctionBuilder* builder);
- void AddExport(Vector<const char> name, WasmFunctionBuilder* builder);
+ void AddExport(Vector<const char> name, ImportExportKindCode kind,
+ uint32_t index);
+ void AddExport(Vector<const char> name, WasmFunctionBuilder* builder) {
+ AddExport(name, kExternalFunction, builder->func_index());
+ }
+ uint32_t AddExportedGlobal(ValueType type, bool mutability,
+ const WasmInitExpr& init, Vector<const char> name);
+ void ExportImportedFunction(Vector<const char> name, int import_index);
void SetMinMemorySize(uint32_t value);
void SetMaxMemorySize(uint32_t value);
void SetHasSharedMemory();
// Writing methods.
- void WriteTo(ZoneBuffer& buffer) const;
- void WriteAsmJsOffsetTable(ZoneBuffer& buffer) const;
+ void WriteTo(ZoneBuffer* buffer) const;
+ void WriteAsmJsOffsetTable(ZoneBuffer* buffer) const;
Zone* zone() { return zone_; }
@@ -258,19 +270,20 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
uint32_t sig_index;
};
- struct WasmFunctionExport {
+ struct WasmGlobalImport {
Vector<const char> name;
- uint32_t function_index;
+ ValueTypeCode type_code;
+ bool mutability;
};
- struct WasmGlobalImport {
+ struct WasmExport {
Vector<const char> name;
- ValueTypeCode type_code;
+ ImportExportKindCode kind;
+ int index; // Can be negative for re-exported imports.
};
struct WasmGlobal {
ValueType type;
- bool exported;
bool mutability;
WasmInitExpr init;
};
@@ -284,18 +297,23 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
ZoneVector<WasmFunctionImport> function_imports_;
- ZoneVector<WasmFunctionExport> function_exports_;
ZoneVector<WasmGlobalImport> global_imports_;
+ ZoneVector<WasmExport> exports_;
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmDataSegment> data_segments_;
ZoneVector<uint32_t> indirect_functions_;
ZoneVector<WasmGlobal> globals_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
+ uint32_t max_table_size_ = 0;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
bool has_shared_memory_;
+#if DEBUG
+ // Once AddExportedImport is called, no more imports can be added.
+ bool adding_imports_allowed_ = true;
+#endif
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index eb40c51dd3..7dea208d8e 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -164,7 +164,11 @@ struct WasmCompilationHint {
WasmCompilationHintTier top_tier;
};
-enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
+enum ModuleOrigin : uint8_t {
+ kWasmOrigin,
+ kAsmJsSloppyOrigin,
+ kAsmJsStrictOrigin
+};
#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
((origin) == kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
@@ -221,6 +225,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
void AddFunctionNameForTesting(int function_index, WireBytesRef name);
};
+inline bool is_asmjs_module(const WasmModule* module) {
+ return module->origin != kWasmOrigin;
+}
+
size_t EstimateStoredSize(const WasmModule* module);
// Returns the number of possible export wrappers for a given module.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index e1fc2d2410..7a80b7ea2b 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -7,7 +7,7 @@
#include "src/wasm/wasm-objects.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/contexts-inl.h"
#include "src/objects/foreign-inl.h"
@@ -51,10 +51,11 @@ CAST_ACCESSOR(WasmModuleObject)
CAST_ACCESSOR(WasmTableObject)
CAST_ACCESSOR(AsmWasmData)
-#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
- bool holder::has_##name() { \
- return !READ_FIELD(*this, offset).IsUndefined(); \
- } \
+#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
+ DEF_GETTER(holder, has_##name, bool) { \
+ Object value = TaggedField<Object, offset>::load(isolate, *this); \
+ return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
+ } \
ACCESSORS(holder, name, type, offset)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
@@ -65,7 +66,7 @@ CAST_ACCESSOR(AsmWasmData)
/* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
/* way of accessing them in order to avoid undefined behavior in C++ */ \
/* code. */ \
- return ReadUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
+ return base::ReadUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
} else { \
return *reinterpret_cast<type const*>(FIELD_ADDR(*this, offset)); \
} \
@@ -77,7 +78,7 @@ CAST_ACCESSOR(AsmWasmData)
/* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
/* way of accessing them in order to avoid undefined behavior in C++ */ \
/* code. */ \
- WriteUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
+ base::WriteUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
} else { \
*reinterpret_cast<type*>(FIELD_ADDR(*this, offset)) = value; \
} \
@@ -110,7 +111,7 @@ void WasmModuleObject::reset_breakpoint_infos() {
GetReadOnlyRoots().undefined_value());
}
bool WasmModuleObject::is_asm_js() {
- bool asm_js = module()->origin == wasm::kAsmJsOrigin;
+ bool asm_js = is_asmjs_module(module());
DCHECK_EQ(asm_js, script().IsUserJavaScript());
DCHECK_EQ(asm_js, has_asm_js_offset_table());
return asm_js;
@@ -148,53 +149,54 @@ Address WasmGlobalObject::address() const {
}
int32_t WasmGlobalObject::GetI32() {
- return ReadLittleEndianValue<int32_t>(address());
+ return base::ReadLittleEndianValue<int32_t>(address());
}
int64_t WasmGlobalObject::GetI64() {
- return ReadLittleEndianValue<int64_t>(address());
+ return base::ReadLittleEndianValue<int64_t>(address());
}
float WasmGlobalObject::GetF32() {
- return ReadLittleEndianValue<float>(address());
+ return base::ReadLittleEndianValue<float>(address());
}
double WasmGlobalObject::GetF64() {
- return ReadLittleEndianValue<double>(address());
+ return base::ReadLittleEndianValue<double>(address());
}
Handle<Object> WasmGlobalObject::GetRef() {
- // We use this getter for anyref, anyfunc, and except_ref.
+ // We use this getter for anyref, funcref, and exnref.
DCHECK(wasm::ValueTypes::IsReferenceType(type()));
return handle(tagged_buffer().get(offset()), GetIsolate());
}
void WasmGlobalObject::SetI32(int32_t value) {
- WriteLittleEndianValue<int32_t>(address(), value);
+ base::WriteLittleEndianValue<int32_t>(address(), value);
}
void WasmGlobalObject::SetI64(int64_t value) {
- WriteLittleEndianValue<int64_t>(address(), value);
+ base::WriteLittleEndianValue<int64_t>(address(), value);
}
void WasmGlobalObject::SetF32(float value) {
- WriteLittleEndianValue<float>(address(), value);
+ base::WriteLittleEndianValue<float>(address(), value);
}
void WasmGlobalObject::SetF64(double value) {
- WriteLittleEndianValue<double>(address(), value);
+ base::WriteLittleEndianValue<double>(address(), value);
}
void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
- // We use this getter anyref and except_ref.
- DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExceptRef);
+ // We use this getter anyref and exnref.
+ DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExnRef);
tagged_buffer().set(offset(), *value);
}
-bool WasmGlobalObject::SetAnyFunc(Isolate* isolate, Handle<Object> value) {
- DCHECK_EQ(type(), wasm::kWasmAnyFunc);
+bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
+ DCHECK_EQ(type(), wasm::kWasmFuncRef);
if (!value->IsNull(isolate) &&
- !WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ !WasmExportedFunction::IsWasmExportedFunction(*value) &&
+ !WasmCapiFunction::IsWasmCapiFunction(*value)) {
return false;
}
tagged_buffer().set(offset(), *value);
@@ -249,6 +251,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tables, FixedArray, kTablesOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_tables, FixedArray,
+ kIndirectFunctionTablesOffset)
ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
kImportedFunctionRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
@@ -257,16 +261,10 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
kExceptionsTableOffset)
-ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
-ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_exported_functions, FixedArray,
kWasmExportedFunctionsOffset)
-inline bool WasmInstanceObject::has_indirect_function_table() {
- return indirect_function_table_sig_ids() != nullptr;
-}
-
void WasmInstanceObject::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
@@ -276,10 +274,29 @@ void WasmInstanceObject::clear_padding() {
}
IndirectFunctionTableEntry::IndirectFunctionTableEntry(
- Handle<WasmInstanceObject> instance, int index)
- : instance_(instance), index_(index) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, instance->indirect_function_table_size());
+ Handle<WasmInstanceObject> instance, int table_index, int entry_index)
+ : instance_(table_index == 0 ? instance
+ : Handle<WasmInstanceObject>::null()),
+ table_(table_index != 0
+ ? handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(
+ table_index)),
+ instance->GetIsolate())
+ : Handle<WasmIndirectFunctionTable>::null()),
+ index_(entry_index) {
+ DCHECK_GE(entry_index, 0);
+ DCHECK_LT(entry_index, table_index == 0
+ ? instance->indirect_function_table_size()
+ : table_->size());
+}
+
+IndirectFunctionTableEntry::IndirectFunctionTableEntry(
+ Handle<WasmIndirectFunctionTable> table, int entry_index)
+ : instance_(Handle<WasmInstanceObject>::null()),
+ table_(table),
+ index_(entry_index) {
+ DCHECK_GE(entry_index, 0);
+ DCHECK_LT(entry_index, table_->size());
}
ImportedFunctionEntry::ImportedFunctionEntry(
@@ -307,6 +324,10 @@ ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
+ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
+ACCESSORS(WasmExportedFunctionData, wasm_call_target, Smi,
+ kWasmCallTargetOffset)
+SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
// WasmJSFunction
WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
@@ -317,6 +338,13 @@ CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, Struct)
CAST_ACCESSOR(WasmJSFunctionData)
+SMI_ACCESSORS(WasmJSFunctionData, serialized_return_count,
+ kSerializedReturnCountOffset)
+SMI_ACCESSORS(WasmJSFunctionData, serialized_parameter_count,
+ kSerializedParameterCountOffset)
+ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
+ kSerializedSignatureOffset)
+ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
// WasmCapiFunction
@@ -336,6 +364,18 @@ ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
+// WasmIndirectFunctionTable
+OBJECT_CONSTRUCTORS_IMPL(WasmIndirectFunctionTable, Struct)
+CAST_ACCESSOR(WasmIndirectFunctionTable)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, size, uint32_t, kSizeOffset)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, sig_ids, uint32_t*,
+ kSigIdsOffset)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, targets, Address*,
+ kTargetsOffset)
+OPTIONAL_ACCESSORS(WasmIndirectFunctionTable, managed_native_allocations,
+ Foreign, kManagedNativeAllocationsOffset)
+ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
+
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 27a56695c2..f44f8326ad 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -139,7 +139,9 @@ class WasmInstanceNativeAllocations {
instance->set_indirect_function_table_refs(*new_refs);
for (uint32_t j = old_size; j < new_size; j++) {
- IndirectFunctionTableEntry(instance, static_cast<int>(j)).clear();
+ // {WasmInstanceNativeAllocations} only manages the memory of table 0.
+ // Therefore we pass the {table_index} as a constant here.
+ IndirectFunctionTableEntry(instance, 0, static_cast<int>(j)).clear();
}
}
uint32_t* indirect_function_table_sig_ids_ = nullptr;
@@ -509,7 +511,7 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
Isolate* isolate = module_object->GetIsolate();
const WasmModule* module = module_object->module();
- if (module->origin != wasm::kAsmJsOrigin) {
+ if (module->origin == wasm::kWasmOrigin) {
// for non-asm.js modules, we just add the function's start offset
// to make a module-relative position.
return byte_offset + module_object->GetFunctionOffset(func_index);
@@ -789,19 +791,21 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
backing_store->set(i, null);
}
+ Handle<Object> max;
+ if (has_maximum) {
+ max = isolate->factory()->NewNumberFromUint(maximum);
+ } else {
+ max = isolate->factory()->undefined_value();
+ }
+
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor(), isolate);
auto table_obj = Handle<WasmTableObject>::cast(
isolate->factory()->NewJSObject(table_ctor));
+ DisallowHeapAllocation no_gc;
table_obj->set_raw_type(static_cast<int>(type));
table_obj->set_entries(*backing_store);
- Handle<Object> max;
- if (has_maximum) {
- max = isolate->factory()->NewNumberFromUint(maximum);
- } else {
- max = isolate->factory()->undefined_value();
- }
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
@@ -865,15 +869,14 @@ int WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- continue;
- }
- // For Table 0 we have to update the indirect function table.
+
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- DCHECK_EQ(old_size, instance->indirect_function_table_size());
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(instance,
- new_size);
+
+ DCHECK_EQ(old_size, WasmInstanceObject::IndirectFunctionTableSize(
+ isolate, instance, table_index));
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table_index, new_size);
}
for (uint32_t entry = old_size; entry < new_size; ++entry) {
@@ -895,10 +898,11 @@ bool WasmTableObject::IsValidElement(Isolate* isolate,
Handle<Object> entry) {
// Anyref tables take everything.
if (table->type() == wasm::kWasmAnyRef) return true;
- // Anyfunc tables can store {null} or {WasmExportedFunction} or
- // {WasmCapiFunction} objects.
+ // FuncRef tables can store {null}, {WasmExportedFunction}, {WasmJSFunction},
+ // or {WasmCapiFunction} objects.
if (entry->IsNull(isolate)) return true;
return WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+ WasmJSFunction::IsWasmJSFunction(*entry) ||
WasmCapiFunction::IsWasmCapiFunction(*entry);
}
@@ -932,6 +936,9 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
DCHECK_NOT_NULL(wasm_function->sig);
UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
target_instance, func_index);
+ } else if (WasmJSFunction::IsWasmJSFunction(*entry)) {
+ UpdateDispatchTables(isolate, table, entry_index,
+ Handle<WasmJSFunction>::cast(entry));
} else {
DCHECK(WasmCapiFunction::IsWasmCapiFunction(*entry));
UpdateDispatchTables(isolate, table, entry_index,
@@ -955,7 +962,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
// First we handle the easy anyref table case.
if (table->type() == wasm::kWasmAnyRef) return entry;
- // Now we handle the anyfunc case.
+ // Now we handle the funcref case.
if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
WasmCapiFunction::IsWasmCapiFunction(*entry)) {
return entry;
@@ -1005,11 +1012,6 @@ void WasmTableObject::UpdateDispatchTables(
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- // TODO(ahaas): Introduce dispatch tables for the other tables as well.
- continue;
- }
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -1017,11 +1019,33 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(*sig);
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, target_instance, target_func_index);
}
}
+void WasmTableObject::UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
+ Handle<WasmJSFunction> function) {
+ // We simply need to update the IFTs for each instance that imports
+ // this table.
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset)),
+ isolate);
+ WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ isolate, instance, table_index, entry_index, function);
+ }
+}
+
void WasmTableObject::UpdateDispatchTables(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
Handle<WasmCapiFunction> capi_function) {
@@ -1052,11 +1076,6 @@ void WasmTableObject::UpdateDispatchTables(
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- // TODO(ahaas): Introduce dispatch tables for the other tables as well.
- continue;
- }
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -1077,7 +1096,7 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(sig);
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, wasm_code->instruction_start(), *tuple);
}
}
@@ -1091,16 +1110,13 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- continue;
- }
Handle<WasmInstanceObject> target_instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
- DCHECK_LT(index, target_instance->indirect_function_table_size());
- IndirectFunctionTableEntry(target_instance, index).clear();
+ DCHECK_LT(index, WasmInstanceObject::IndirectFunctionTableSize(
+ isolate, target_instance, table_index));
+ IndirectFunctionTableEntry(target_instance, table_index, index).clear();
}
}
@@ -1118,8 +1134,8 @@ void WasmTableObject::SetFunctionTablePlaceholder(
void WasmTableObject::GetFunctionTableEntry(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
- int* function_index) {
- DCHECK_EQ(table->type(), wasm::kWasmAnyFunc);
+ int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function) {
+ DCHECK_EQ(table->type(), wasm::kWasmFuncRef);
DCHECK_LT(entry_index, table->entries().length());
// We initialize {is_valid} with {true}. We may change it later.
*is_valid = true;
@@ -1132,17 +1148,91 @@ void WasmTableObject::GetFunctionTableEntry(
auto target_func = Handle<WasmExportedFunction>::cast(element);
*instance = handle(target_func->instance(), isolate);
*function_index = target_func->function_index();
+ *maybe_js_function = MaybeHandle<WasmJSFunction>();
+ return;
+ }
+ if (WasmJSFunction::IsWasmJSFunction(*element)) {
+ *instance = MaybeHandle<WasmInstanceObject>();
+ *maybe_js_function = Handle<WasmJSFunction>::cast(element);
return;
- } else if (element->IsTuple2()) {
+ }
+ if (element->IsTuple2()) {
auto tuple = Handle<Tuple2>::cast(element);
*instance = handle(WasmInstanceObject::cast(tuple->value1()), isolate);
*function_index = Smi::cast(tuple->value2()).value();
+ *maybe_js_function = MaybeHandle<WasmJSFunction>();
return;
}
*is_valid = false;
}
namespace {
+class IftNativeAllocations {
+ public:
+ IftNativeAllocations(Handle<WasmIndirectFunctionTable> table, uint32_t size)
+ : sig_ids_(size), targets_(size) {
+ table->set_sig_ids(sig_ids_.data());
+ table->set_targets(targets_.data());
+ }
+
+ static size_t SizeInMemory(uint32_t size) {
+ return size * (sizeof(Address) + sizeof(uint32_t));
+ }
+
+ void resize(Handle<WasmIndirectFunctionTable> table, uint32_t new_size) {
+ DCHECK_GE(new_size, sig_ids_.size());
+ DCHECK_EQ(this, Managed<IftNativeAllocations>::cast(
+ table->managed_native_allocations())
+ .raw());
+ sig_ids_.resize(new_size);
+ targets_.resize(new_size);
+ table->set_sig_ids(sig_ids_.data());
+ table->set_targets(targets_.data());
+ }
+
+ private:
+ std::vector<uint32_t> sig_ids_;
+ std::vector<Address> targets_;
+};
+} // namespace
+
+Handle<WasmIndirectFunctionTable> WasmIndirectFunctionTable::New(
+ Isolate* isolate, uint32_t size) {
+ auto refs = isolate->factory()->NewFixedArray(static_cast<int>(size));
+ auto table = Handle<WasmIndirectFunctionTable>::cast(
+ isolate->factory()->NewStruct(WASM_INDIRECT_FUNCTION_TABLE_TYPE));
+ table->set_size(size);
+ table->set_refs(*refs);
+ auto native_allocations = Managed<IftNativeAllocations>::Allocate(
+ isolate, IftNativeAllocations::SizeInMemory(size), table, size);
+ table->set_managed_native_allocations(*native_allocations);
+ for (uint32_t i = 0; i < size; ++i) {
+ IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ }
+ return table;
+}
+
+void WasmIndirectFunctionTable::Resize(Isolate* isolate,
+ Handle<WasmIndirectFunctionTable> table,
+ uint32_t new_size) {
+ uint32_t old_size = table->size();
+ if (old_size >= new_size) return; // Nothing to do.
+
+ Managed<IftNativeAllocations>::cast(table->managed_native_allocations())
+ .raw()
+ ->resize(table, new_size);
+
+ Handle<FixedArray> old_refs(table->refs(), isolate);
+ Handle<FixedArray> new_refs = isolate->factory()->CopyFixedArrayAndGrow(
+ old_refs, static_cast<int>(new_size - old_size));
+ table->set_refs(*new_refs);
+ table->set_size(new_size);
+ for (uint32_t i = old_size; i < new_size; ++i) {
+ IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ }
+}
+
+namespace {
bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
if (new_size > old_buffer->allocation_length()) return false;
@@ -1380,6 +1470,15 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
isolate->native_context()->wasm_global_constructor(), isolate);
auto global_obj = Handle<WasmGlobalObject>::cast(
isolate->factory()->NewJSObject(global_ctor));
+ {
+ // Disallow GC until all fields have acceptable types.
+ DisallowHeapAllocation no_gc;
+
+ global_obj->set_flags(0);
+ global_obj->set_type(type);
+ global_obj->set_offset(offset);
+ global_obj->set_is_mutable(is_mutable);
+ }
if (wasm::ValueTypes::IsReferenceType(type)) {
DCHECK(maybe_untagged_buffer.is_null());
@@ -1412,19 +1511,24 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_untagged_buffer(*untagged_buffer);
}
- global_obj->set_flags(0);
- global_obj->set_type(type);
- global_obj->set_offset(offset);
- global_obj->set_is_mutable(is_mutable);
return global_obj;
}
void IndirectFunctionTableEntry::clear() {
- instance_->indirect_function_table_sig_ids()[index_] = -1;
- instance_->indirect_function_table_targets()[index_] = 0;
- instance_->indirect_function_table_refs().set(
- index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
+ if (!instance_.is_null()) {
+ instance_->indirect_function_table_sig_ids()[index_] = -1;
+ instance_->indirect_function_table_targets()[index_] = 0;
+ instance_->indirect_function_table_refs().set(
+ index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
+ } else {
+ DCHECK(!table_.is_null());
+ table_->sig_ids()[index_] = -1;
+ table_->targets()[index_] = 0;
+ table_->refs().set(
+ index_,
+ ReadOnlyRoots(GetIsolateFromWritableObject(*table_)).undefined_value());
+ }
}
void IndirectFunctionTableEntry::Set(int sig_id,
@@ -1455,31 +1559,34 @@ void IndirectFunctionTableEntry::Set(int sig_id,
void IndirectFunctionTableEntry::Set(int sig_id, Address call_target,
Object ref) {
- instance_->indirect_function_table_sig_ids()[index_] = sig_id;
- instance_->indirect_function_table_targets()[index_] = call_target;
- instance_->indirect_function_table_refs().set(index_, ref);
-}
-
-Object IndirectFunctionTableEntry::object_ref() {
- return instance_->indirect_function_table_refs().get(index_);
+ if (!instance_.is_null()) {
+ instance_->indirect_function_table_sig_ids()[index_] = sig_id;
+ instance_->indirect_function_table_targets()[index_] = call_target;
+ instance_->indirect_function_table_refs().set(index_, ref);
+ } else {
+ DCHECK(!table_.is_null());
+ table_->sig_ids()[index_] = sig_id;
+ table_->targets()[index_] = call_target;
+ table_->refs().set(index_, ref);
+ }
}
-int IndirectFunctionTableEntry::sig_id() {
- return instance_->indirect_function_table_sig_ids()[index_];
+Object IndirectFunctionTableEntry::object_ref() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_refs().get(index_)
+ : table_->refs().get(index_);
}
-Address IndirectFunctionTableEntry::target() {
- return instance_->indirect_function_table_targets()[index_];
+int IndirectFunctionTableEntry::sig_id() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_sig_ids()[index_]
+ : table_->sig_ids()[index_];
}
-void IndirectFunctionTableEntry::CopyFrom(
- const IndirectFunctionTableEntry& that) {
- instance_->indirect_function_table_sig_ids()[index_] =
- that.instance_->indirect_function_table_sig_ids()[that.index_];
- instance_->indirect_function_table_targets()[index_] =
- that.instance_->indirect_function_table_targets()[that.index_];
- instance_->indirect_function_table_refs().set(
- index_, that.instance_->indirect_function_table_refs().get(that.index_));
+Address IndirectFunctionTableEntry::target() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_targets()[index_]
+ : table_->targets()[index_];
}
void ImportedFunctionEntry::SetWasmToJs(
@@ -1535,11 +1642,21 @@ constexpr uint16_t WasmInstanceObject::kTaggedFieldOffsets[];
// static
bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- Handle<WasmInstanceObject> instance, uint32_t minimum_size) {
+ Handle<WasmInstanceObject> instance, int table_index,
+ uint32_t minimum_size) {
+ Isolate* isolate = instance->GetIsolate();
+ if (table_index > 0) {
+ DCHECK_LT(table_index, instance->indirect_function_tables().length());
+ auto table =
+ handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(table_index)),
+ isolate);
+ WasmIndirectFunctionTable::Resize(isolate, table, minimum_size);
+ return true;
+ }
uint32_t old_size = instance->indirect_function_table_size();
if (old_size >= minimum_size) return false; // Nothing to do.
- Isolate* isolate = instance->GetIsolate();
HandleScope scope(isolate);
auto native_allocations = GetNativeAllocations(*instance);
native_allocations->resize_indirect_function_table(isolate, instance,
@@ -1624,8 +1741,6 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_indirect_function_table_targets(nullptr);
instance->set_native_context(*isolate->native_context());
instance->set_module_object(*module_object);
- instance->set_undefined_value(ReadOnlyRoots(isolate).undefined_value());
- instance->set_null_value(ReadOnlyRoots(isolate).null_value());
instance->set_jump_table_start(
module_object->native_module()->jump_table_start());
@@ -1695,83 +1810,55 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
return native_module->GetCallTargetForFunction(func_index);
}
-namespace {
-void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
- uint32_t src, uint32_t count, bool copy_backward) {
- DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
- if (copy_backward) {
- for (uint32_t i = count; i > 0; i--) {
- auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
- auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
- to_entry.CopyFrom(from_entry);
- }
- } else {
- for (uint32_t i = 0; i < count; i++) {
- auto to_entry = IndirectFunctionTableEntry(instance, dst + i);
- auto from_entry = IndirectFunctionTableEntry(instance, src + i);
- to_entry.CopyFrom(from_entry);
- }
+int WasmInstanceObject::IndirectFunctionTableSize(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t table_index) {
+ if (table_index == 0) {
+ return instance->indirect_function_table_size();
}
+ auto table =
+ handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(table_index)),
+ isolate);
+ return table->size();
}
-} // namespace
// static
bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_src_index,
uint32_t table_dst_index,
+ uint32_t table_src_index,
uint32_t dst, uint32_t src,
uint32_t count) {
- if (static_cast<int>(table_dst_index) >= instance->tables().length()) {
- return false;
- }
- if (static_cast<int>(table_src_index) >= instance->tables().length()) {
- return false;
- }
-
- // TODO(titzer): multiple tables in TableCopy
- CHECK_EQ(0, table_src_index);
- CHECK_EQ(0, table_dst_index);
- auto max = instance->indirect_function_table_size();
- bool copy_backward = src < dst && dst - src < count;
- bool ok = ClampToBounds(dst, &count, max);
+ // Copying 0 elements is a no-op.
+ if (count == 0) return true;
+ CHECK_LT(table_dst_index, instance->tables().length());
+ CHECK_LT(table_src_index, instance->tables().length());
+ auto table_dst = handle(
+ WasmTableObject::cast(instance->tables().get(table_dst_index)), isolate);
+ auto table_src = handle(
+ WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
+ uint32_t max_dst = static_cast<uint32_t>(table_dst->entries().length());
+ uint32_t max_src = static_cast<uint32_t>(table_src->entries().length());
+ bool copy_backward = src < dst;
+ bool ok = ClampToBounds(dst, &count, max_dst);
// Use & instead of && so the clamp is not short-circuited.
- ok &= ClampToBounds(src, &count, max);
+ ok &= ClampToBounds(src, &count, max_src);
// If performing a partial copy when copying backward, then the first access
// will be out-of-bounds, so no entries should be copied.
if (copy_backward && !ok) return ok;
- if (dst == src || count == 0) return ok; // no-op
-
- // TODO(titzer): multiple tables in TableCopy
- auto table = handle(
- WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
- // Broadcast table copy operation to all instances that import this table.
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
- for (int i = 0; i < dispatch_tables->length();
- i += kDispatchTableNumElements) {
- Handle<WasmInstanceObject> target_instance(
- WasmInstanceObject::cast(
- dispatch_tables->get(i + kDispatchTableInstanceOffset)),
- isolate);
- CopyTableEntriesImpl(target_instance, dst, src, count, copy_backward);
+ // no-op
+ if ((dst == src && table_dst_index == table_src_index) || count == 0) {
+ return ok;
}
- // Copy the function entries.
- auto dst_table = handle(
- WasmTableObject::cast(instance->tables().get(table_dst_index)), isolate);
- auto src_table = handle(
- WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
- if (copy_backward) {
- for (uint32_t i = count; i > 0; i--) {
- dst_table->entries().set(dst + i - 1,
- src_table->entries().get(src + i - 1));
- }
- } else {
- for (uint32_t i = 0; i < count; i++) {
- dst_table->entries().set(dst + i, src_table->entries().get(src + i));
- }
+ for (uint32_t i = 0; i < count; ++i) {
+ uint32_t src_index = copy_backward ? (src + count - i - 1) : src + i;
+ uint32_t dst_index = copy_backward ? (dst + count - i - 1) : dst + i;
+ auto value = WasmTableObject::Get(isolate, table_src, src_index);
+ WasmTableObject::Set(isolate, table_dst, dst_index, value);
}
return ok;
}
@@ -1782,6 +1869,8 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
uint32_t table_index,
uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
+ // Copying 0 elements is a no-op.
+ if (count == 0) return true;
// Note that this implementation just calls through to module instantiation.
// This is intentional, so that the runtime only depends on the object
// methods, and not the module instantiation logic.
@@ -1830,9 +1919,8 @@ WasmInstanceObject::GetOrCreateWasmExportedFunction(
// The wrapper may not exist yet if no function in the exports section has
// this signature. We compile it and store the wrapper in the module for
// later use.
- wrapper = compiler::CompileJSToWasmWrapper(isolate, function.sig,
- function.imported)
- .ToHandleChecked();
+ wrapper = wasm::JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate, function.sig, function.imported);
module_object->export_wrappers().set(wrapper_index, *wrapper);
}
result = WasmExportedFunction::New(
@@ -1861,6 +1949,55 @@ void WasmInstanceObject::SetWasmExportedFunction(
}
// static
+void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int table_index,
+ int entry_index, Handle<WasmJSFunction> js_function) {
+ // Deserialize the signature encapsulated with the {WasmJSFunction}.
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ wasm::FunctionSig* sig = js_function->GetSignature(&zone);
+ auto sig_id = instance->module()->signature_map.Find(*sig);
+
+ // Compile a wrapper for the target callable.
+ Handle<JSReceiver> callable(js_function->GetCallable(), isolate);
+ wasm::WasmCodeRefScope code_ref_scope;
+ Address call_target = kNullAddress;
+ if (sig_id >= 0) {
+ wasm::NativeModule* native_module =
+ instance->module_object().native_module();
+ // TODO(mstarzinger): Cache and reuse wrapper code.
+ const wasm::WasmFeatures enabled = native_module->enabled_features();
+ auto resolved =
+ compiler::ResolveWasmImportCall(callable, sig, enabled.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
+ callable = resolved.second; // Update to ultimate target.
+ DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
+ wasm::CompilationEnv env = native_module->CreateCompilationEnv();
+ wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ isolate->wasm_engine(), &env, kind, sig, false);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions), GetCodeKind(result),
+ wasm::ExecutionTier::kNone);
+ wasm::WasmCode* published_code =
+ native_module->PublishCode(std::move(wasm_code));
+ isolate->counters()->wasm_generated_code_size()->Increment(
+ published_code->instructions().length());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ published_code->reloc_info().length());
+ call_target = published_code->instruction_start();
+ }
+
+ // Update the dispatch table.
+ Handle<Tuple2> tuple =
+ isolate->factory()->NewTuple2(instance, callable, AllocationType::kOld);
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
+ .Set(sig_id, call_target, *tuple);
+}
+
+// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag) {
@@ -2013,8 +2150,8 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
encoded_size += 8;
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
encoded_size += 1;
break;
default:
@@ -2080,10 +2217,10 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
int num_imported_functions = instance->module()->num_imported_functions;
int jump_table_offset = -1;
if (func_index >= num_imported_functions) {
- ptrdiff_t jump_table_diff =
- instance->module_object().native_module()->jump_table_offset(
+ uint32_t jump_table_diff =
+ instance->module_object().native_module()->GetJumpTableOffset(
func_index);
- DCHECK(jump_table_diff >= 0 && jump_table_diff <= INT_MAX);
+ DCHECK_GE(kMaxInt, jump_table_diff);
jump_table_offset = static_cast<int>(jump_table_diff);
}
Handle<WasmExportedFunctionData> function_data =
@@ -2093,9 +2230,13 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
+ function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
+ function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
+ function_data->set_packed_args_size(0);
MaybeHandle<String> maybe_name;
- if (instance->module()->origin == wasm::kAsmJsOrigin) {
+ bool is_asm_js_module = instance->module_object().is_asm_js();
+ if (is_asm_js_module) {
// We can use the function name only for asm.js. For WebAssembly, the
// function name is specified as the function_index.toString().
maybe_name = WasmModuleObject::GetFunctionNameOrNull(
@@ -2110,10 +2251,18 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
}
- bool is_asm_js_module = instance->module_object().is_asm_js();
- Handle<Map> function_map = is_asm_js_module
- ? isolate->sloppy_function_map()
- : isolate->wasm_exported_function_map();
+ Handle<Map> function_map;
+ switch (instance->module()->origin) {
+ case wasm::kWasmOrigin:
+ function_map = isolate->wasm_exported_function_map();
+ break;
+ case wasm::kAsmJsSloppyOrigin:
+ function_map = isolate->sloppy_function_map();
+ break;
+ case wasm::kAsmJsStrictOrigin:
+ function_map = isolate->strict_function_map();
+ break;
+ }
NewFunctionArgs args =
NewFunctionArgs::ForWasm(name, function_data, function_map);
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
@@ -2143,9 +2292,22 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
+ DCHECK_LE(sig->all().size(), kMaxInt);
+ int sig_size = static_cast<int>(sig->all().size());
+ int return_count = static_cast<int>(sig->return_count());
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ Handle<PodArray<wasm::ValueType>> serialized_sig =
+ PodArray<wasm::ValueType>::New(isolate, sig_size, AllocationType::kOld);
+ if (sig_size > 0) {
+ serialized_sig->copy_in(0, sig->all().begin(), sig_size);
+ }
Handle<WasmJSFunctionData> function_data =
Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
+ function_data->set_serialized_return_count(return_count);
+ function_data->set_serialized_parameter_count(parameter_count);
+ function_data->set_serialized_signature(*serialized_sig);
+ function_data->set_callable(*callable);
// TODO(7742): Make this callable by using a proper wrapper code.
function_data->set_wrapper_code(
isolate->builtins()->builtin(Builtins::kIllegal));
@@ -2160,6 +2322,37 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
return Handle<WasmJSFunction>::cast(js_function);
}
+JSReceiver WasmJSFunction::GetCallable() const {
+ return shared().wasm_js_function_data().callable();
+}
+
+wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
+ WasmJSFunctionData function_data = shared().wasm_js_function_data();
+ int sig_size = function_data.serialized_signature().length();
+ wasm::ValueType* types = zone->NewArray<wasm::ValueType>(sig_size);
+ if (sig_size > 0) {
+ function_data.serialized_signature().copy_out(0, types, sig_size);
+ }
+ int return_count = function_data.serialized_return_count();
+ int parameter_count = function_data.serialized_parameter_count();
+ return new (zone) wasm::FunctionSig(return_count, parameter_count, types);
+}
+
+bool WasmJSFunction::MatchesSignature(wasm::FunctionSig* sig) {
+ DCHECK_LE(sig->all().size(), kMaxInt);
+ int sig_size = static_cast<int>(sig->all().size());
+ int return_count = static_cast<int>(sig->return_count());
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ WasmJSFunctionData function_data = shared().wasm_js_function_data();
+ if (return_count != function_data.serialized_return_count() ||
+ parameter_count != function_data.serialized_parameter_count()) {
+ return false;
+ }
+ if (sig_size == 0) return true; // Prevent undefined behavior.
+ const wasm::ValueType* expected = sig->all().begin();
+ return function_data.serialized_signature().matches(expected, sig_size);
+}
+
Address WasmCapiFunction::GetHostCallTarget() const {
return shared().wasm_capi_function_data().call_target();
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 1e6ced0b76..1200f7040a 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -40,15 +40,17 @@ class SeqOneByteString;
class WasmCapiFunction;
class WasmDebugInfo;
class WasmExceptionTag;
+class WasmExportedFunction;
class WasmInstanceObject;
+class WasmJSFunction;
class WasmModuleObject;
-class WasmExportedFunction;
+class WasmIndirectFunctionTable;
template <class CppType>
class Managed;
#define DECL_OPTIONAL_ACCESSORS(name, type) \
- V8_INLINE bool has_##name(); \
+ DECL_GETTER(has_##name, bool) \
DECL_ACCESSORS(name, type)
// A helper for an entry in an indirect function table (IFT).
@@ -60,7 +62,11 @@ class Managed;
// - target = entrypoint to Wasm code or import wrapper code
class IndirectFunctionTableEntry {
public:
- inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int index);
+ inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int table_index,
+ int entry_index);
+
+ inline IndirectFunctionTableEntry(Handle<WasmIndirectFunctionTable> table,
+ int entry_index);
void clear();
V8_EXPORT_PRIVATE void Set(int sig_id,
@@ -68,14 +74,13 @@ class IndirectFunctionTableEntry {
int target_func_index);
void Set(int sig_id, Address call_target, Object ref);
- void CopyFrom(const IndirectFunctionTableEntry& that);
-
- Object object_ref();
- int sig_id();
- Address target();
+ Object object_ref() const;
+ int sig_id() const;
+ Address target() const;
private:
Handle<WasmInstanceObject> const instance_;
+ Handle<WasmIndirectFunctionTable> const table_;
int const index_;
};
@@ -292,6 +297,7 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static void Fill(Isolate* isolate, Handle<WasmTableObject> table,
uint32_t start, Handle<Object> entry, uint32_t count);
+ // TODO(mstarzinger): Unify these three methods into one.
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index, wasm::FunctionSig* sig,
@@ -300,6 +306,10 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index,
+ Handle<WasmJSFunction> function);
+ static void UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
Handle<WasmCapiFunction> capi_function);
static void ClearDispatchTables(Isolate* isolate,
@@ -312,14 +322,12 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
int func_index);
// This function reads the content of a function table entry and returns it
- // through the out parameters {is_valid}, {is_null}, {instance}, and
- // {function_index}.
- static void GetFunctionTableEntry(Isolate* isolate,
- Handle<WasmTableObject> table,
- int entry_index, bool* is_valid,
- bool* is_null,
- MaybeHandle<WasmInstanceObject>* instance,
- int* function_index);
+ // through the out parameters {is_valid}, {is_null}, {instance},
+ // {function_index}, and {maybe_js_function}.
+ static void GetFunctionTableEntry(
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
+ bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
+ int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function);
OBJECT_CONSTRUCTORS(WasmTableObject, JSObject);
};
@@ -406,7 +414,7 @@ class WasmGlobalObject : public JSObject {
inline void SetF32(float value);
inline void SetF64(double value);
inline void SetAnyRef(Handle<Object> value);
- inline bool SetAnyFunc(Isolate* isolate, Handle<Object> value);
+ inline bool SetFuncRef(Isolate* isolate, Handle<Object> value);
private:
// This function returns the address of the global's data in the
@@ -431,12 +439,11 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(indirect_function_tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
- DECL_ACCESSORS(undefined_value, Oddball)
- DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code)
DECL_OPTIONAL_ACCESSORS(wasm_exported_functions, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
@@ -482,7 +489,6 @@ class WasmInstanceObject : public JSObject {
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kGlobalsStartOffset, kSystemPointerSize) \
V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
- V(kUndefinedValueOffset, kTaggedSize) \
V(kIsolateRootOffset, kSystemPointerSize) \
V(kJumpTableStartOffset, kSystemPointerSize) \
/* End of often-accessed fields. */ \
@@ -495,9 +501,9 @@ class WasmInstanceObject : public JSObject {
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
V(kDebugInfoOffset, kTaggedSize) \
V(kTablesOffset, kTaggedSize) \
+ V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
V(kExceptionsTableOffset, kTaggedSize) \
- V(kNullValueOffset, kTaggedSize) \
V(kCEntryStubOffset, kTaggedSize) \
V(kWasmExportedFunctionsOffset, kTaggedSize) \
V(kRealStackLimitAddressOffset, kSystemPointerSize) \
@@ -526,7 +532,6 @@ class WasmInstanceObject : public JSObject {
static constexpr uint16_t kTaggedFieldOffsets[] = {
kImportedFunctionRefsOffset,
kIndirectFunctionTableRefsOffset,
- kUndefinedValueOffset,
kModuleObjectOffset,
kExportsObjectOffset,
kNativeContextOffset,
@@ -536,18 +541,17 @@ class WasmInstanceObject : public JSObject {
kImportedMutableGlobalsBuffersOffset,
kDebugInfoOffset,
kTablesOffset,
+ kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
kExceptionsTableOffset,
- kNullValueOffset,
kCEntryStubOffset,
kWasmExportedFunctionsOffset};
V8_EXPORT_PRIVATE const wasm::WasmModule* module();
V8_EXPORT_PRIVATE static bool EnsureIndirectFunctionTableWithMinimumSize(
- Handle<WasmInstanceObject> instance, uint32_t minimum_size);
-
- bool has_indirect_function_table();
+ Handle<WasmInstanceObject> instance, int table_index,
+ uint32_t minimum_size);
V8_EXPORT_PRIVATE void SetRawMemory(byte* mem_start, size_t mem_size);
@@ -561,11 +565,15 @@ class WasmInstanceObject : public JSObject {
Address GetCallTarget(uint32_t func_index);
+ static int IndirectFunctionTableSize(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index);
+
// Copies table entries. Returns {false} if the ranges are out-of-bounds.
static bool CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_src_index,
- uint32_t table_dst_index, uint32_t dst,
+ uint32_t table_dst_index,
+ uint32_t table_src_index, uint32_t dst,
uint32_t src,
uint32_t count) V8_WARN_UNUSED_RESULT;
@@ -597,6 +605,14 @@ class WasmInstanceObject : public JSObject {
int index,
Handle<WasmExportedFunction> val);
+ // Imports a constructed {WasmJSFunction} into the indirect function table of
+ // this instance. Note that this might trigger wrapper compilation, since a
+ // {WasmJSFunction} is instance-independent and just wraps a JS callable.
+ static void ImportWasmJSFunctionIntoTable(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ int table_index, int entry_index,
+ Handle<WasmJSFunction> js_function);
+
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
@@ -681,6 +697,12 @@ class WasmJSFunction : public JSFunction {
static Handle<WasmJSFunction> New(Isolate* isolate, wasm::FunctionSig* sig,
Handle<JSReceiver> callable);
+ JSReceiver GetCallable() const;
+ // Deserializes the signature of this function using the provided zone. Note
+ // that lifetime of the signature is hence directly coupled to the zone.
+ wasm::FunctionSig* GetSignature(Zone* zone);
+ bool MatchesSignature(wasm::FunctionSig* sig);
+
DECL_CAST(WasmJSFunction)
OBJECT_CONSTRUCTORS(WasmJSFunction, JSFunction);
};
@@ -704,6 +726,34 @@ class WasmCapiFunction : public JSFunction {
OBJECT_CONSTRUCTORS(WasmCapiFunction, JSFunction);
};
+class WasmIndirectFunctionTable : public Struct {
+ public:
+ DECL_PRIMITIVE_ACCESSORS(size, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(sig_ids, uint32_t*)
+ DECL_PRIMITIVE_ACCESSORS(targets, Address*)
+ DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
+ DECL_ACCESSORS(refs, FixedArray)
+
+ V8_EXPORT_PRIVATE static Handle<WasmIndirectFunctionTable> New(
+ Isolate* isolate, uint32_t size);
+ static void Resize(Isolate* isolate, Handle<WasmIndirectFunctionTable> table,
+ uint32_t new_size);
+
+ DECL_CAST(WasmIndirectFunctionTable)
+
+ DECL_PRINTER(WasmIndirectFunctionTable)
+ DECL_VERIFIER(WasmIndirectFunctionTable)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_INDIRECT_FUNCTION_TABLE_FIELDS)
+
+ STATIC_ASSERT(kStartOfStrongFieldsOffset == kManagedNativeAllocationsOffset);
+ using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
+
+ OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
+};
+
class WasmCapiFunctionData : public Struct {
public:
DECL_PRIMITIVE_ACCESSORS(call_target, Address)
@@ -734,6 +784,9 @@ class WasmExportedFunctionData : public Struct {
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
+ DECL_ACCESSORS(c_wrapper_code, Object)
+ DECL_ACCESSORS(wasm_call_target, Smi)
+ DECL_INT_ACCESSORS(packed_args_size)
DECL_CAST(WasmExportedFunctionData)
@@ -754,6 +807,10 @@ class WasmExportedFunctionData : public Struct {
// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
class WasmJSFunctionData : public Struct {
public:
+ DECL_INT_ACCESSORS(serialized_return_count)
+ DECL_INT_ACCESSORS(serialized_parameter_count)
+ DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
+ DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
DECL_CAST(WasmJSFunctionData)
@@ -847,8 +904,8 @@ class WasmDebugInfo : public Struct {
Address frame_pointer,
int frame_index);
- V8_EXPORT_PRIVATE static Handle<JSFunction> GetCWasmEntry(
- Handle<WasmDebugInfo>, wasm::FunctionSig*);
+ V8_EXPORT_PRIVATE static Handle<Code> GetCWasmEntry(Handle<WasmDebugInfo>,
+ wasm::FunctionSig*);
OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
};
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 88b9e90381..d3fb4c42cf 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -23,7 +23,9 @@ namespace wasm {
#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
+#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
+#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
@@ -31,6 +33,7 @@ namespace wasm {
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_S1x2_OP(name, str) CASE_OP(S1x2##name, "s1x2." str)
#define CASE_S1x4_OP(name, str) CASE_OP(S1x4##name, "s1x4." str)
#define CASE_S1x8_OP(name, str) CASE_OP(S1x8##name, "s1x8." str)
#define CASE_S1x16_OP(name, str) CASE_OP(S1x16##name, "s1x16." str)
@@ -148,8 +151,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(TeeLocal, "local.tee")
CASE_OP(GetGlobal, "global.get")
CASE_OP(SetGlobal, "global.set")
- CASE_OP(GetTable, "table.get")
- CASE_OP(SetTable, "table.set")
+ CASE_OP(TableGet, "table.get")
+ CASE_OP(TableSet, "table.set")
CASE_ALL_OP(Const, "const")
CASE_OP(MemorySize, "memory.size")
CASE_OP(MemoryGrow, "memory.grow")
@@ -217,11 +220,26 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// SIMD opcodes.
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
+ CASE_F64x2_OP(Neg, "neg")
+ CASE_I64x2_OP(Neg, "neg")
CASE_SIMD_OP(Eq, "eq")
+ CASE_F64x2_OP(Eq, "eq")
+ CASE_I64x2_OP(Eq, "eq")
CASE_SIMD_OP(Ne, "ne")
+ CASE_F64x2_OP(Ne, "ne")
+ CASE_I64x2_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
+ CASE_I64x2_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
+ CASE_I64x2_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
+ CASE_I64x2_OP(Mul, "mul")
+ CASE_F64x2_OP(Splat, "splat")
+ CASE_F64x2_OP(Lt, "lt")
+ CASE_F64x2_OP(Le, "le")
+ CASE_F64x2_OP(Gt, "gt")
+ CASE_F64x2_OP(Ge, "ge")
+ CASE_F64x2_OP(Abs, "abs")
CASE_F32x4_OP(Abs, "abs")
CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")
@@ -240,18 +258,29 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
+ CASE_F64x2_OP(ExtractLane, "extract_lane")
+ CASE_F64x2_OP(ReplaceLane, "replace_lane")
CASE_F32x4_OP(ExtractLane, "extract_lane")
CASE_F32x4_OP(ReplaceLane, "replace_lane")
+ CASE_I64x2_OP(ExtractLane, "extract_lane")
+ CASE_I64x2_OP(ReplaceLane, "replace_lane")
CASE_SIMDI_OP(ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
CASE_SIGN_OP(SIMDI, Max, "max")
CASE_SIGN_OP(SIMDI, Lt, "lt")
+ CASE_SIGN_OP(I64x2, Lt, "lt")
CASE_SIGN_OP(SIMDI, Le, "le")
+ CASE_SIGN_OP(I64x2, Le, "le")
CASE_SIGN_OP(SIMDI, Gt, "gt")
+ CASE_SIGN_OP(I64x2, Gt, "gt")
CASE_SIGN_OP(SIMDI, Ge, "ge")
+ CASE_SIGN_OP(I64x2, Ge, "ge")
CASE_SIGN_OP(SIMDI, Shr, "shr")
+ CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
+ CASE_I64x2_OP(Shl, "shl")
+ CASE_I64x2_OP(Splat, "splat")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
@@ -264,6 +293,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Not, "not")
CASE_S128_OP(Select, "select")
CASE_S8x16_OP(Shuffle, "shuffle")
+ CASE_S1x2_OP(AnyTrue, "any_true")
+ CASE_S1x2_OP(AllTrue, "all_true")
CASE_S1x4_OP(AnyTrue, "any_true")
CASE_S1x4_OP(AllTrue, "all_true")
CASE_S1x8_OP(AnyTrue, "any_true")
@@ -274,6 +305,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
+ CASE_OP(AtomicFence, "atomic.fence")
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
@@ -295,7 +327,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_F32_OP
#undef CASE_F64_OP
#undef CASE_REF_OP
+#undef CASE_F64x2_OP
#undef CASE_F32x4_OP
+#undef CASE_I64x2_OP
#undef CASE_I32x4_OP
#undef CASE_I16x8_OP
#undef CASE_I8x16_OP
@@ -303,6 +337,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_S32x4_OP
#undef CASE_S16x8_OP
#undef CASE_S8x16_OP
+#undef CASE_S1x2_OP
#undef CASE_S1x4_OP
#undef CASE_S1x8_OP
#undef CASE_S1x16_OP
@@ -474,7 +509,8 @@ struct GetSimdOpcodeSigIndex {
struct GetAtomicOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_ATOMIC_OPCODE(CASE) kSigEnum_None;
+ return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
+ kSigEnum_None;
#undef CASE
}
};
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 6f9cb70141..22bd47d54b 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -6,7 +6,7 @@
#define V8_WASM_WASM_OPCODES_H_
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-constants.h"
@@ -51,8 +51,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(TeeLocal, 0x22, _) \
V(GetGlobal, 0x23, _) \
V(SetGlobal, 0x24, _) \
- V(GetTable, 0x25, _) \
- V(SetTable, 0x26, _) \
+ V(TableGet, 0x25, _) \
+ V(TableSet, 0x26, _) \
V(I32Const, 0x41, _) \
V(I64Const, 0x42, _) \
V(F32Const, 0x43, _) \
@@ -272,7 +272,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I8x16Splat, 0xfd04, s_i) \
V(I16x8Splat, 0xfd08, s_i) \
V(I32x4Splat, 0xfd0c, s_i) \
+ V(I64x2Splat, 0xfd0f, s_l) \
V(F32x4Splat, 0xfd12, s_f) \
+ V(F64x2Splat, 0xfd15, s_d) \
V(I8x16Eq, 0xfd18, s_ss) \
V(I8x16Ne, 0xfd19, s_ss) \
V(I8x16LtS, 0xfd1a, s_ss) \
@@ -303,12 +305,28 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I32x4LeU, 0xfd33, s_ss) \
V(I32x4GeS, 0xfd34, s_ss) \
V(I32x4GeU, 0xfd35, s_ss) \
+ V(I64x2Eq, 0xfd36, s_ss) \
+ V(I64x2Ne, 0xfd37, s_ss) \
+ V(I64x2LtS, 0xfd38, s_ss) \
+ V(I64x2LtU, 0xfd39, s_ss) \
+ V(I64x2GtS, 0xfd3a, s_ss) \
+ V(I64x2GtU, 0xfd3b, s_ss) \
+ V(I64x2LeS, 0xfd3c, s_ss) \
+ V(I64x2LeU, 0xfd3d, s_ss) \
+ V(I64x2GeS, 0xfd3e, s_ss) \
+ V(I64x2GeU, 0xfd3f, s_ss) \
V(F32x4Eq, 0xfd40, s_ss) \
V(F32x4Ne, 0xfd41, s_ss) \
V(F32x4Lt, 0xfd42, s_ss) \
V(F32x4Gt, 0xfd43, s_ss) \
V(F32x4Le, 0xfd44, s_ss) \
V(F32x4Ge, 0xfd45, s_ss) \
+ V(F64x2Eq, 0xfd46, s_ss) \
+ V(F64x2Ne, 0xfd47, s_ss) \
+ V(F64x2Lt, 0xfd48, s_ss) \
+ V(F64x2Gt, 0xfd49, s_ss) \
+ V(F64x2Le, 0xfd4a, s_ss) \
+ V(F64x2Ge, 0xfd4b, s_ss) \
V(S128Not, 0xfd4c, s_s) \
V(S128And, 0xfd4d, s_ss) \
V(S128Or, 0xfd4e, s_ss) \
@@ -352,6 +370,12 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I32x4MinU, 0xfd81, s_ss) \
V(I32x4MaxS, 0xfd82, s_ss) \
V(I32x4MaxU, 0xfd83, s_ss) \
+ V(I64x2Neg, 0xfd84, s_s) \
+ V(S1x2AnyTrue, 0xfd85, i_s) \
+ V(S1x2AllTrue, 0xfd86, i_s) \
+ V(I64x2Add, 0xfd8a, s_ss) \
+ V(I64x2Sub, 0xfd8d, s_ss) \
+ V(I64x2Mul, 0xfd8c, s_ss) \
V(F32x4Abs, 0xfd95, s_s) \
V(F32x4Neg, 0xfd96, s_s) \
V(F32x4RecipApprox, 0xfd98, s_s) \
@@ -361,6 +385,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(F32x4Mul, 0xfd9c, s_ss) \
V(F32x4Min, 0xfd9e, s_ss) \
V(F32x4Max, 0xfd9f, s_ss) \
+ V(F64x2Abs, 0xfda0, s_s) \
+ V(F64x2Neg, 0xfda1, s_s) \
V(I32x4SConvertF32x4, 0xfdab, s_s) \
V(I32x4UConvertF32x4, 0xfdac, s_s) \
V(F32x4SConvertI32x4, 0xfdaf, s_s) \
@@ -385,7 +411,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I8x16ExtractLane, 0xfd05, _) \
V(I16x8ExtractLane, 0xfd09, _) \
V(I32x4ExtractLane, 0xfd0d, _) \
+ V(I64x2ExtractLane, 0xfd10, _) \
V(F32x4ExtractLane, 0xfd13, _) \
+ V(F64x2ExtractLane, 0xfd16, _) \
V(I8x16Shl, 0xfd54, _) \
V(I8x16ShrS, 0xfd55, _) \
V(I8x16ShrU, 0xfd56, _) \
@@ -394,13 +422,18 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I16x8ShrU, 0xfd67, _) \
V(I32x4Shl, 0xfd76, _) \
V(I32x4ShrS, 0xfd77, _) \
- V(I32x4ShrU, 0xfd78, _)
+ V(I32x4ShrU, 0xfd78, _) \
+ V(I64x2Shl, 0xfd87, _) \
+ V(I64x2ShrS, 0xfd88, _) \
+ V(I64x2ShrU, 0xfd89, _)
#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
V(I8x16ReplaceLane, 0xfd07, _) \
V(I16x8ReplaceLane, 0xfd0b, _) \
V(I32x4ReplaceLane, 0xfd0e, _) \
- V(F32x4ReplaceLane, 0xfd14, _)
+ V(I64x2ReplaceLane, 0xfd11, _) \
+ V(F32x4ReplaceLane, 0xfd14, _) \
+ V(F64x2ReplaceLane, 0xfd17, _)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
@@ -424,7 +457,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(TableCopy, 0xfc0e, v_iii) \
V(TableGrow, 0xfc0f, i_ai) \
V(TableSize, 0xfc10, i_v) \
- /*TableFill is polymorph in the second parameter. It's anyref or anyfunc.*/ \
+ /*TableFill is polymorph in the second parameter. It's anyref or funcref.*/ \
V(TableFill, 0xfc11, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
@@ -495,6 +528,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
+#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
+ /* AtomicFence does not target a particular linear memory. */ \
+ V(AtomicFence, 0xfe03, v_v)
+
// All opcodes.
#define FOREACH_OPCODE(V) \
FOREACH_CONTROL_OPCODE(V) \
@@ -510,6 +547,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_SIMD_MEM_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V) \
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
FOREACH_NUMERIC_OPCODE(V)
// All signatures.
@@ -553,13 +591,15 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
V(i_r, kWasmI32, kWasmAnyRef) \
- V(i_ai, kWasmI32, kWasmAnyFunc, kWasmI32)
+ V(i_ai, kWasmI32, kWasmFuncRef, kWasmI32)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
V(s_f, kWasmS128, kWasmF32) \
+ V(s_d, kWasmS128, kWasmF64) \
V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
V(s_i, kWasmS128, kWasmI32) \
+ V(s_l, kWasmS128, kWasmI64) \
V(s_si, kWasmS128, kWasmS128, kWasmI32) \
V(i_s, kWasmI32, kWasmS128) \
V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 4688bcf8e1..42eee037d5 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -18,28 +18,28 @@ namespace wasm {
namespace {
PRINTF_FORMAT(3, 0)
-void VPrintFToString(std::string& str, size_t str_offset, const char* format,
+void VPrintFToString(std::string* str, size_t str_offset, const char* format,
va_list args) {
- DCHECK_LE(str_offset, str.size());
+ DCHECK_LE(str_offset, str->size());
size_t len = str_offset + strlen(format);
// Allocate increasingly large buffers until the message fits.
for (;; len = base::bits::RoundUpToPowerOfTwo64(len + 1)) {
DCHECK_GE(kMaxInt, len);
- str.resize(len);
+ str->resize(len);
va_list args_copy;
va_copy(args_copy, args);
- int written = VSNPrintF(Vector<char>(&str.front() + str_offset,
+ int written = VSNPrintF(Vector<char>(&str->front() + str_offset,
static_cast<int>(len - str_offset)),
format, args_copy);
va_end(args_copy);
if (written < 0) continue; // not enough space.
- str.resize(str_offset + written);
+ str->resize(str_offset + written);
return;
}
}
PRINTF_FORMAT(3, 4)
-void PrintFToString(std::string& str, size_t str_offset, const char* format,
+void PrintFToString(std::string* str, size_t str_offset, const char* format,
...) {
va_list args;
va_start(args, format);
@@ -52,7 +52,7 @@ void PrintFToString(std::string& str, size_t str_offset, const char* format,
// static
std::string WasmError::FormatError(const char* format, va_list args) {
std::string result;
- VPrintFToString(result, 0, format, args);
+ VPrintFToString(&result, 0, format, args);
return result;
}
@@ -63,10 +63,10 @@ void ErrorThrower::Format(ErrorType type, const char* format, va_list args) {
size_t context_len = 0;
if (context_) {
- PrintFToString(error_msg_, 0, "%s: ", context_);
+ PrintFToString(&error_msg_, 0, "%s: ", context_);
context_len = error_msg_.size();
}
- VPrintFToString(error_msg_, context_len, format, args);
+ VPrintFToString(&error_msg_, context_len, format, args);
error_type_ = type;
}
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 1cea08943b..a20b2f115a 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -645,6 +645,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
// Log the code within the generated module for profiling.
native_module->LogWasmCodes(isolate);
+ // Finish the Wasm script now and make it public to the debugger.
+ isolate->debug()->OnAfterCompile(script);
return module_object;
}
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index a79ae02fe2..e17d34e36f 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -105,7 +105,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
os << WasmOpcodes::OpcodeName(opcode);
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
os << " (type " << imm.sig_index << ")";
} else if (imm.out_arity() > 0) {
os << " " << ValueTypes::TypeName(imm.out_type(0));
@@ -140,16 +140,18 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
while (iterator.has_next()) os << ' ' << iterator.next();
break;
}
- case kExprCallIndirect: {
+ case kExprCallIndirect:
+ case kExprReturnCallIndirect: {
CallIndirectImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
DCHECK_EQ(0, imm.table_index);
- os << "call_indirect " << imm.sig_index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.sig_index;
break;
}
- case kExprCallFunction: {
+ case kExprCallFunction:
+ case kExprReturnCall: {
CallFunctionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- os << "call " << imm.index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
case kExprGetLocal:
@@ -170,6 +172,18 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
+ case kExprTableGet:
+ case kExprTableSet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprSelectWithType: {
+ SelectTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << ValueTypes::TypeName(imm.type);
+ break;
+ }
#define CASE_CONST(type, str, cast_type) \
case kExpr##type##Const: { \
Imm##type##Immediate<Decoder::kNoValidate> imm(&i, i.pc()); \
@@ -182,6 +196,12 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
CASE_CONST(F64, f64, double)
#undef CASE_CONST
+ case kExprRefFunc: {
+ FunctionIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+
#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
@@ -193,6 +213,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
}
FOREACH_SIMPLE_OPCODE(CASE_OPCODE)
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE_OPCODE)
case kExprUnreachable:
case kExprNop:
case kExprReturn:
@@ -200,19 +221,150 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprMemoryGrow:
case kExprDrop:
case kExprSelect:
+ case kExprRethrow:
+ case kExprRefNull:
os << WasmOpcodes::OpcodeName(opcode);
break;
+
+ case kNumericPrefix: {
+ WasmOpcode numeric_opcode = i.prefixed_opcode();
+ switch (numeric_opcode) {
+ case kExprI32SConvertSatF32:
+ case kExprI32UConvertSatF32:
+ case kExprI32SConvertSatF64:
+ case kExprI32UConvertSatF64:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
+ case kExprMemoryCopy:
+ case kExprMemoryFill:
+ os << WasmOpcodes::OpcodeName(opcode);
+ break;
+ case kExprMemoryInit: {
+ MemoryInitImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << imm.data_segment_index;
+ break;
+ }
+ case kExprDataDrop: {
+ DataDropImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprTableInit: {
+ TableInitImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << imm.elem_segment_index << ' ' << imm.table.index;
+ break;
+ }
+ case kExprElemDrop: {
+ ElemDropImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprTableCopy: {
+ TableCopyImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.table_src.index
+ << ' ' << imm.table_dst.index;
+ break;
+ }
+ case kExprTableGrow:
+ case kExprTableSize:
+ case kExprTableFill: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+
+ case kSimdPrefix: {
+ WasmOpcode simd_opcode = i.prefixed_opcode();
+ switch (simd_opcode) {
+ case kExprS128LoadMem:
+ case kExprS128StoreMem: {
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc(),
+ kMaxUInt32);
+ os << WasmOpcodes::OpcodeName(opcode) << " offset=" << imm.offset
+ << " align=" << (1ULL << imm.alignment);
+ break;
+ }
+
+ case kExprS8x16Shuffle: {
+ Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode);
+ for (uint8_t v : imm.shuffle) {
+ os << ' ' << v;
+ }
+ break;
+ }
+
+ case kExprI8x16ExtractLane:
+ case kExprI16x8ExtractLane:
+ case kExprI32x4ExtractLane:
+ case kExprI64x2ExtractLane:
+ case kExprF32x4ExtractLane:
+ case kExprF64x2ExtractLane:
+ case kExprI8x16ReplaceLane:
+ case kExprI16x8ReplaceLane:
+ case kExprI32x4ReplaceLane:
+ case kExprI64x2ReplaceLane:
+ case kExprF32x4ReplaceLane:
+ case kExprF64x2ReplaceLane: {
+ SimdLaneImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.lane;
+ break;
+ }
+
+ case kExprI8x16Shl:
+ case kExprI8x16ShrS:
+ case kExprI8x16ShrU:
+ case kExprI16x8Shl:
+ case kExprI16x8ShrS:
+ case kExprI16x8ShrU:
+ case kExprI32x4Shl:
+ case kExprI32x4ShrS:
+ case kExprI32x4ShrU:
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU: {
+ SimdShiftImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.shift;
+ break;
+ }
+
+ FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(opcode);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+
case kAtomicPrefix: {
WasmOpcode atomic_opcode = i.prefixed_opcode();
switch (atomic_opcode) {
FOREACH_ATOMIC_OPCODE(CASE_OPCODE) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc(),
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1,
kMaxUInt32);
os << WasmOpcodes::OpcodeName(atomic_opcode)
<< " offset=" << imm.offset
<< " align=" << (1ULL << imm.alignment);
break;
}
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(atomic_opcode);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -222,14 +374,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
// This group is just printed by their internal opcode name, as they
// should never be shown to end-users.
- FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE)
- // TODO(wasm): Add correct printing for SIMD and atomic opcodes once
- // they are publicly available.
- FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_MASK_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_MEM_OPCODE(CASE_OPCODE)
- os << WasmOpcodes::OpcodeName(opcode);
+ FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(opcode);
+ }
break;
#undef CASE_OPCODE
diff --git a/deps/v8/src/wasm/wasm-text.h b/deps/v8/src/wasm/wasm-text.h
index 60957966ab..205df5e6fd 100644
--- a/deps/v8/src/wasm/wasm-text.h
+++ b/deps/v8/src/wasm/wasm-text.h
@@ -7,9 +7,10 @@
#include <cstdint>
#include <ostream>
-#include <tuple>
#include <vector>
+#include "src/common/globals.h"
+
namespace v8 {
namespace debug {
@@ -26,10 +27,10 @@ struct ModuleWireBytes;
// Generate disassembly according to official text format.
// Output disassembly to the given output stream, and optionally return an
// offset table of <byte offset, line, column> via the given pointer.
-void PrintWasmText(
- const WasmModule *module, const ModuleWireBytes &wire_bytes,
- uint32_t func_index, std::ostream &os,
- std::vector<debug::WasmDisassemblyOffsetTableEntry> *offset_table);
+V8_EXPORT_PRIVATE void PrintWasmText(
+ const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ uint32_t func_index, std::ostream& os,
+ std::vector<debug::WasmDisassemblyOffsetTableEntry>* offset_table);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 23f1aed7f0..8de53b96cf 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_WASM_VALUE_H_
#define V8_WASM_WASM_VALUE_H_
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/handles/handles.h"
#include "src/utils/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
@@ -15,10 +15,12 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define FOREACH_SIMD_TYPE(V) \
- V(float, float4, f32x4, 4) \
- V(int32_t, int4, i32x4, 4) \
- V(int16_t, int8, i16x8, 8) \
+#define FOREACH_SIMD_TYPE(V) \
+ V(double, float2, f64x2, 2) \
+ V(float, float4, f32x4, 4) \
+ V(int64_t, int2, i64x2, 2) \
+ V(int32_t, int4, i32x4, 4) \
+ V(int16_t, int8, i16x8, 8) \
V(int8_t, int16, i8x16, 16)
#define DEFINE_SIMD_TYPE(cType, sType, name, kSize) \
@@ -35,12 +37,12 @@ class Simd128 {
val_[i] = 0;
}
}
-#define DEFINE_SIMD_TYPE_SPECIFIC_METHODS(cType, sType, name, size) \
- explicit Simd128(sType val) { \
- WriteUnalignedValue<sType>(reinterpret_cast<Address>(val_), val); \
- } \
- sType to_##name() { \
- return ReadUnalignedValue<sType>(reinterpret_cast<Address>(val_)); \
+#define DEFINE_SIMD_TYPE_SPECIFIC_METHODS(cType, sType, name, size) \
+ explicit Simd128(sType val) { \
+ base::WriteUnalignedValue<sType>(reinterpret_cast<Address>(val_), val); \
+ } \
+ sType to_##name() { \
+ return base::ReadUnalignedValue<sType>(reinterpret_cast<Address>(val_)); \
}
FOREACH_SIMD_TYPE(DEFINE_SIMD_TYPE_SPECIFIC_METHODS)
#undef DEFINE_SIMD_TYPE_SPECIFIC_METHODS
@@ -73,18 +75,20 @@ class WasmValue {
public:
WasmValue() : type_(kWasmStmt), bit_pattern_{} {}
-#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
- explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
- static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
- "size too big for WasmValue"); \
- WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), v); \
- } \
- ctype to_##name() const { \
- DCHECK_EQ(localtype, type_); \
- return to_##name##_unchecked(); \
- } \
- ctype to_##name##_unchecked() const { \
- return ReadUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_)); \
+#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
+ explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
+ static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
+ "size too big for WasmValue"); \
+ base::WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), \
+ v); \
+ } \
+ ctype to_##name() const { \
+ DCHECK_EQ(localtype, type_); \
+ return to_##name##_unchecked(); \
+ } \
+ ctype to_##name##_unchecked() const { \
+ return base::ReadUnalignedValue<ctype>( \
+ reinterpret_cast<Address>(bit_pattern_)); \
}
FOREACH_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
#undef DEFINE_TYPE_SPECIFIC_METHODS
diff --git a/deps/v8/src/zone/OWNERS b/deps/v8/src/zone/OWNERS
new file mode 100644
index 0000000000..01c515ab90
--- /dev/null
+++ b/deps/v8/src/zone/OWNERS
@@ -0,0 +1,3 @@
+clemensh@chromium.org
+sigurds@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/src/zone/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index fe62d4bb4c..69928d5925 100644
--- a/deps/v8/src/zone/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -26,8 +26,18 @@ class ZoneAllocator {
using other = ZoneAllocator<O>;
};
-#ifdef V8_CC_MSVC
- // MSVS unfortunately requires the default constructor to be defined.
+#ifdef V8_OS_WIN
+ // The exported class ParallelMove derives from ZoneVector, which derives
+ // from std::vector. On Windows, the semantics of dllexport mean that
+ // a class's superclasses that are not explicitly exported themselves get
+ // implicitly exported together with the subclass, and exporting a class
+ // exports all its functions -- including the std::vector() constructors
+ // that don't take an explicit allocator argument, which in turn reference
+ // the vector allocator's default constructor. So this constructor needs
+ // to exist for linking purposes, even if it's never called.
+ // Other fixes would be to disallow subclasses of ZoneVector (etc) to be
+ // exported, or using composition instead of inheritance for either
+ // ZoneVector and friends or for ParallelMove.
ZoneAllocator() : ZoneAllocator(nullptr) { UNREACHABLE(); }
#endif
explicit ZoneAllocator(Zone* zone) : zone_(zone) {}
@@ -37,14 +47,8 @@ class ZoneAllocator {
template <typename U>
friend class ZoneAllocator;
- T* address(T& x) const { return &x; }
- const T* address(const T& x) const { return &x; }
-
- T* allocate(size_t n, const void* hint = nullptr) {
- return static_cast<T*>(zone_->NewArray<T>(static_cast<int>(n)));
- }
- void deallocate(T* p, size_t) { /* noop for Zones */
- }
+ T* allocate(size_t n) { return zone_->NewArray<T>(n); }
+ void deallocate(T* p, size_t) {} // noop for zones
size_t max_size() const {
return std::numeric_limits<int>::max() / sizeof(T);
@@ -84,13 +88,6 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
using other = RecyclingZoneAllocator<O>;
};
-#ifdef V8_CC_MSVC
- // MSVS unfortunately requires the default constructor to be defined.
- RecyclingZoneAllocator()
- : ZoneAllocator(nullptr, nullptr), free_list_(nullptr) {
- UNREACHABLE();
- }
-#endif
explicit RecyclingZoneAllocator(Zone* zone)
: ZoneAllocator<T>(zone), free_list_(nullptr) {}
template <typename U>
@@ -100,16 +97,15 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
template <typename U>
friend class RecyclingZoneAllocator;
- T* allocate(size_t n, const void* hint = nullptr) {
+ T* allocate(size_t n) {
// Only check top block in free list, since this will be equal to or larger
// than the other blocks in the free list.
if (free_list_ && free_list_->size >= n) {
T* return_val = reinterpret_cast<T*>(free_list_);
free_list_ = free_list_->next;
return return_val;
- } else {
- return ZoneAllocator<T>::allocate(n, hint);
}
+ return ZoneAllocator<T>::allocate(n);
}
void deallocate(T* p, size_t n) {
diff --git a/deps/v8/src/zone/zone-splay-tree.h b/deps/v8/src/zone/zone-splay-tree.h
deleted file mode 100644
index c28df38fda..0000000000
--- a/deps/v8/src/zone/zone-splay-tree.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ZONE_ZONE_SPLAY_TREE_H_
-#define V8_ZONE_ZONE_SPLAY_TREE_H_
-
-#include "src/utils/splay-tree.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// A zone splay tree. The config type parameter encapsulates the
-// different configurations of a concrete splay tree (see splay-tree.h).
-// The tree itself and all its elements are allocated in the Zone.
-template <typename Config>
-class ZoneSplayTree final : public SplayTree<Config, ZoneAllocationPolicy> {
- public:
- explicit ZoneSplayTree(Zone* zone)
- : SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
- ~ZoneSplayTree() {
- // Reset the root to avoid unneeded iteration over all tree nodes
- // in the destructor. For a zone-allocated tree, nodes will be
- // freed by the Zone.
- SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
- }
-
- void* operator new(size_t size, Zone* zone) { return zone->New(size); }
-
- void operator delete(void* pointer) { UNREACHABLE(); }
- void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ZONE_ZONE_SPLAY_TREE_H_
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index a6f45fad54..81fc9c7d8b 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -27,8 +27,7 @@ constexpr size_t kASanRedzoneBytes = 0;
} // namespace
-Zone::Zone(AccountingAllocator* allocator, const char* name,
- SegmentSize segment_size)
+Zone::Zone(AccountingAllocator* allocator, const char* name)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
@@ -36,8 +35,7 @@ Zone::Zone(AccountingAllocator* allocator, const char* name,
allocator_(allocator),
segment_head_(nullptr),
name_(name),
- sealed_(false),
- segment_size_(segment_size) {
+ sealed_(false) {
allocator_->ZoneCreation(this);
}
@@ -137,12 +135,9 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
- if (segment_size_ == SegmentSize::kLarge) {
- new_size = kMaximumSegmentSize;
- }
if (new_size < kMinimumSegmentSize) {
new_size = kMinimumSegmentSize;
- } else if (new_size > kMaximumSegmentSize) {
+ } else if (new_size >= kMaximumSegmentSize) {
// Limit the size of new segments to avoid growing the segment size
// exponentially, thus putting pressure on contiguous virtual address space.
// All the while making sure to allocate a segment large enough to hold the
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index b113f49585..e2b66253f5 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -37,12 +37,9 @@ namespace internal {
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
-enum class SegmentSize { kLarge, kDefault };
-
class V8_EXPORT_PRIVATE Zone final {
public:
- Zone(AccountingAllocator* allocator, const char* name,
- SegmentSize segment_size = SegmentSize::kDefault);
+ Zone(AccountingAllocator* allocator, const char* name);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -102,7 +99,7 @@ class V8_EXPORT_PRIVATE Zone final {
static const size_t kMinimumSegmentSize = 8 * KB;
// Never allocate segments larger than this size in bytes.
- static const size_t kMaximumSegmentSize = 1 * MB;
+ static const size_t kMaximumSegmentSize = 32 * KB;
// Report zone excess when allocation exceeds this limit.
static const size_t kExcessLimit = 256 * MB;
@@ -136,7 +133,6 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_;
const char* name_;
bool sealed_;
- SegmentSize segment_size_;
};
// ZoneObject is an abstraction that helps define classes of objects